Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions cognite/client/_api/data_modeling/instances.py
Original file line number Diff line number Diff line change
Expand Up @@ -1630,8 +1630,9 @@ async def query(
>>> debug_params = DebugParameters(
... emit_results=False,
... include_plan=True, # Include the postgres execution plan
... include_translated_query=True, # Include the internal representation of the query.
... profile=True,
... include_translated_query=True, # Include the internal representation of the query
... include_llm_prompt=True, # Include a prompt to ask an LLM to help debug the results
... profile=True, # Most thorough level of query analysis. Requires emit_results=False
... )
>>> res = client.data_modeling.instances.query(query, debug=debug_params)
>>> print(res.debug)
Expand Down Expand Up @@ -1706,8 +1707,9 @@ async def sync(
>>> debug_params = DebugParameters(
... emit_results=False,
... include_plan=True, # Include the postgres execution plan
... include_translated_query=True, # Include the internal representation of the query.
... profile=True,
... include_translated_query=True, # Include the internal representation of the query
... include_llm_prompt=True, # Include a prompt to ask an LLM to help debug the results
... profile=True, # Most thorough level of query analysis. Requires emit_results=False
... )
>>> res = client.data_modeling.instances.sync(query, debug=debug_params)
>>> print(res.debug)
Expand Down Expand Up @@ -1877,8 +1879,9 @@ async def list(
>>> debug_params = DebugParameters(
... emit_results=False,
... include_plan=True, # Include the postgres execution plan
... include_translated_query=True, # Include the internal representation of the query.
... profile=True,
... include_translated_query=True, # Include the internal representation of the query
... include_llm_prompt=True, # Include a prompt to ask an LLM to help debug the results
... profile=True, # Most thorough level of query analysis. Requires emit_results=False
... )
>>> res = client.data_modeling.instances.list(debug=debug_params, sources=my_view)
>>> print(res.debug)
Expand Down
17 changes: 10 additions & 7 deletions cognite/client/_sync_api/data_modeling/instances.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""
===============================================================================
77ddb197a6a73314c704dafacbe56215
eaf67bcf675abbeb0949346ea1ef5d1f
This file is auto-generated from the Async API modules, - do not edit manually!
===============================================================================
"""
Expand Down Expand Up @@ -1147,8 +1147,9 @@ def query(self, query: Query, include_typing: bool = False, debug: DebugParamete
>>> debug_params = DebugParameters(
... emit_results=False,
... include_plan=True, # Include the postgres execution plan
... include_translated_query=True, # Include the internal representation of the query.
... profile=True,
... include_translated_query=True, # Include the internal representation of the query
... include_llm_prompt=True, # Include a prompt to ask an LLM to help debug the results
... profile=True, # Most thorough level of query analysis. Requires emit_results=False
... )
>>> res = client.data_modeling.instances.query(query, debug=debug_params)
>>> print(res.debug)
Expand Down Expand Up @@ -1224,8 +1225,9 @@ def sync(self, query: QuerySync, include_typing: bool = False, debug: DebugParam
>>> debug_params = DebugParameters(
... emit_results=False,
... include_plan=True, # Include the postgres execution plan
... include_translated_query=True, # Include the internal representation of the query.
... profile=True,
... include_translated_query=True, # Include the internal representation of the query
... include_llm_prompt=True, # Include a prompt to ask an LLM to help debug the results
... profile=True, # Most thorough level of query analysis. Requires emit_results=False
... )
>>> res = client.data_modeling.instances.sync(query, debug=debug_params)
>>> print(res.debug)
Expand Down Expand Up @@ -1367,8 +1369,9 @@ def list(
>>> debug_params = DebugParameters(
... emit_results=False,
... include_plan=True, # Include the postgres execution plan
... include_translated_query=True, # Include the internal representation of the query.
... profile=True,
... include_translated_query=True, # Include the internal representation of the query
... include_llm_prompt=True, # Include a prompt to ask an LLM to help debug the results
... profile=True, # Most thorough level of query analysis. Requires emit_results=False
... )
>>> res = client.data_modeling.instances.list(debug=debug_params, sources=my_view)
>>> print(res.debug)
Expand Down
10 changes: 10 additions & 0 deletions cognite/client/data_classes/data_modeling/debug.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,18 +25,21 @@ class DebugInfo(CogniteResource):
cursoring, to help identify areas for improvement.
translated_query (TranslatedQuery | None): The internal representation of the query.
plan (ExecutionPlan | None): The execution plan for the query.
llm_prompt (str | None): A prompt that can be used to ask a large language model (LLM) to help debug the query results.
"""

notices: DebugNoticeList | None = None
translated_query: TranslatedQuery | None = None
plan: ExecutionPlan | None = None
llm_prompt: str | None = None

@classmethod
def _load(cls, data: dict[str, Any]) -> DebugInfo:
return cls(
notices=DebugNoticeList._load_if(data.get("notices")),
translated_query=TranslatedQuery._load_if(data.get("translatedQuery")),
plan=ExecutionPlan._load_if(data.get("plan")),
llm_prompt=data.get("llmPrompt"),
)

def dump(self, camel_case: bool = True) -> dict[str, Any]:
Expand All @@ -48,6 +51,8 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]:
obj[key] = self.translated_query.dump(camel_case=camel_case)
if self.plan is not None:
obj["plan"] = self.plan.dump(camel_case=camel_case)
if self.llm_prompt is not None:
obj["llmPrompt" if camel_case else "llm_prompt"] = self.llm_prompt
return obj


Expand Down Expand Up @@ -113,13 +118,15 @@ class DebugParameters:
timeout (int | None): Query timeout in milliseconds. Can be used to override the default timeout when analysing queries. Requires emit_results=False.
include_translated_query (bool): Include the internal representation of the query.
include_plan (bool): Include the execution plan for the query.
include_llm_prompt (bool): Include a prompt that can be used to ask a large language model (LLM) to help debug the query results.
profile (bool): Most thorough level of query analysis. Requires emit_results=False.
"""

emit_results: bool = True
timeout: int | None = None
include_translated_query: bool = False
include_plan: bool = False
include_llm_prompt: bool = False
profile: bool = False

@property
Expand All @@ -139,6 +146,9 @@ def dump(self, camel_case: bool = True) -> dict[str, bool | int]:
if self.include_plan:
key = "includePlan" if camel_case else "include_plan"
res[key] = self.include_plan
if self.include_llm_prompt:
key = "includeLlmPrompt" if camel_case else "include_llm_prompt"
res[key] = self.include_llm_prompt
return res


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1439,7 +1439,7 @@ def test_instance_list__and___call___debug_info(self, cognite_client: CogniteCli
# Verify that debug info, which is returned at the root level of the response, is properly handled.
# We also test with and without 'include_typing' which is returned the same way.
debug_params = DebugParameters(
emit_results=False, profile=False, include_translated_query=True, include_plan=True
emit_results=False, profile=False, include_translated_query=True, include_plan=True, include_llm_prompt=True
)
# Sorting by this should return SortNotBackedByIndexNotice:
bad_sort = InstanceSort(CogniteAsset.get_source().as_property_ref("sourceCreatedTime"))
Expand Down Expand Up @@ -1483,14 +1483,15 @@ def test_instance_list__and___call___debug_info(self, cognite_client: CogniteCli
assert isinstance(res.debug.notices, DebugNoticeList)
assert len(res.debug.notices) == 1
assert isinstance(res.debug.notices[0], SortNotBackedByIndexNotice)
assert isinstance(res.debug.llm_prompt, str)

if include_typing:
assert res.typing is not None

@pytest.mark.usefixtures("cognite_asset_nodes")
def test_instance_query_and_sync_debug_info(self, cognite_client: CogniteClient) -> None:
debug_params = DebugParameters(
emit_results=False, profile=False, include_translated_query=True, include_plan=True
emit_results=False, profile=False, include_translated_query=True, include_plan=True, include_llm_prompt=True
)
rse_with_sort = NodeResultSetExpression(
sort=[InstanceSort(CogniteAsset.get_source().as_property_ref("sourceCreatedTime"))],
Expand All @@ -1510,6 +1511,7 @@ def test_instance_query_and_sync_debug_info(self, cognite_client: CogniteClient)
assert isinstance(res.debug.translated_query, TranslatedQuery)
assert isinstance(res.debug.plan, ExecutionPlan)
assert isinstance(res.debug.notices, DebugNoticeList)
assert isinstance(res.debug.llm_prompt, str)
if res is res_query: # Sort not allowed for /sync
assert len(res.debug.notices) == 1
# Since we specify both emit_results and timeout, we should get...:
Expand Down
Loading