diff --git a/stagehand/llm/client.py b/stagehand/llm/client.py index 06dc959..48d83de 100644 --- a/stagehand/llm/client.py +++ b/stagehand/llm/client.py @@ -114,6 +114,11 @@ async def create_response( if "gpt-5" in completion_model: filtered_params["temperature"] = 1 + # GPT-5.1 and GPT-5.2 don't support "minimal" reasoning_effort. + # Set "low" for these models to avoid OpenAI API errors. + if "gpt-5.1" in completion_model or "gpt-5.2" in completion_model: + filtered_params["reasoning_effort"] = "low" + self.logger.debug( f"Calling litellm.acompletion with model={completion_model} and params: {filtered_params}", category="llm",