@@ -56,7 +56,7 @@ class AgentConfig:
56564. List next steps to move towards the goal and propose next immediate action.
5757Then produce the single function call that performs the proposed action. If the task is complete, produce the final step."""
5858 summarize_system_prompt : str = """
59- You are a helpful assistant that summarizes conversation history. Following messages is the history to summarize:"""
59+ You are a helpful assistant that summarizes agent interaction history. Following messages is the history to summarize:"""
6060 summarize_prompt : str = """
6161Summarize the presented agent interaction history concisely.
6262Focus on:
@@ -76,6 +76,7 @@ def __init__(
7676 config : AgentConfig ,
7777 ):
7878 self .action_set = action_set
79+ self .tools = self .action_set .tools ()
7980 self .history : list [dict | Message ] = [{"role" : "system" , "content" : config .system_prompt }]
8081 self .llm = llm
8182 self .token_counter = token_counter
@@ -131,14 +132,13 @@ def get_action(self, obs: dict) -> tuple[ToolCall, dict]:
131132 logger .warning ("Max actions reached, stopping agent." )
132133 return ToolCall (name = "final_step" ), {}
133134
134- self .history += self .obs_to_messages (self . obs_preprocessor ( obs ) )
135+ self .history += self .obs_to_messages (obs )
135136 self .maybe_compact_history ()
136- tools = [tool .model_dump () for tool in self .action_set .actions ]
137137 messages = self .history + [{"role" : "user" , "content" : self .config .guidance }]
138138
139139 try :
140140 logger .info (colored (f"Prompt:\n { pprint .pformat (messages , width = 120 )} " , "blue" ))
141- response = self .llm (tools = tools , messages = messages )
141+ response = self .llm (tools = self . tools , messages = messages )
142142 message = response .choices [0 ].message # type: ignore
143143 except Exception as e :
144144 logger .exception (f"Error getting LLM response: { e } . Prompt: { messages } " )
@@ -155,6 +155,7 @@ def max_actions_reached(self) -> bool:
155155 return len (prev_actions ) >= self .config .max_actions
156156
157157 def thoughts_from_message (self , message : Message ) -> str :
158+ """Extract the agent's thoughts from the LLM message."""
158159 thoughts = []
159160 if reasoning := message .get ("reasoning_content" ):
160161 thoughts .append (reasoning )
@@ -168,6 +169,7 @@ def thoughts_from_message(self, message: Message) -> str:
168169 return "\n \n " .join (thoughts )
169170
170171 def action_from_message (self , message : Message ) -> ToolCall :
172+ """Parse the ToolCall from the LLM message."""
171173 if message .tool_calls :
172174 if len (message .tool_calls ) > 1 :
173175 logger .warning ("Multiple tool calls found in LLM response, using the first one." )
0 commit comments