跳转至

React

labridge.agent.react.react

labridge.agent.react.react.InstructReActAgent

Bases: AgentRunner

This Agent uses the Reasoning and acting prompt framework. Additionally, this class enables the user to intervene the reasoning phase and acting phase:

  • If enable_instruct is set to True, in the reasoning phase, the user is able to instruct the agent's thought.
  • If 'enable_comment' is set to True, in the reacting phase, the user is able to comment the agent's action, the user's comment will be treated as observation to instruct the agent's next thought.
PARAMETER DESCRIPTION
tools

The available tools of the agent.

TYPE: Sequence[BaseTool]

llm

The used LLM.

TYPE: LLM

memory

The short-term memory.

TYPE: BaseMemory

max_iterations

The maximum reasoning-acting steps.

TYPE: int DEFAULT: 10

react_chat_formatter

The ReAct prompt template.

TYPE: Optional[ReActChatFormatter] DEFAULT: None

output_parser

Used to parse tool call from the agent's Acting output.

TYPE: Optional[ReActOutputParser] DEFAULT: None

callback_manager

TYPE: Optional[CallbackManager] DEFAULT: None

verbose

Whether to show the inner Reasoning-Acting process.

TYPE: bool DEFAULT: False

tool_retriever

Used to retrieve proper tool among the given tools.

TYPE: Optional[ObjectRetriever[BaseTool]] DEFAULT: None

handle_reasoning_failure_fn

TYPE: Optional[Callable[[CallbackManager, Exception], ToolOutput]] DEFAULT: None

enable_instruct

Whether to enable user's instructing in the reasoning phase.

TYPE: bool DEFAULT: False

enable_comment

Whether to enable user's commenting in the acting phase.

TYPE: bool DEFAULT: False

Source code in labridge\agent\react\react.py
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
class InstructReActAgent(AgentRunner):
	r"""
	This Agent uses the Reasoning and acting prompt framework.
	Additionally, this class enables the user to intervene the reasoning phase and acting phase:

	- If `enable_instruct` is set to True, in the reasoning phase, the user is able to instruct the agent's thought.
	- If 'enable_comment' is set to True, in the reacting phase, the user is able to comment the agent's action, the
	user's comment will be treated as observation to instruct the agent's next thought.

	Args:
		tools (Sequence[BaseTool]): The available tools of the agent.
		llm (LLM): The used LLM.
		memory (BaseMemory): The short-term memory.
		max_iterations (int): The maximum reasoning-acting steps.
		react_chat_formatter (Optional[ReActChatFormatter]): The ReAct prompt template.
		output_parser (Optional[ReActOutputParser]): Used to parse tool call from the agent's Acting output.
		callback_manager (Optional[CallbackManager]):
		verbose (bool): Whether to show the inner Reasoning-Acting process.
		tool_retriever (Optional[ObjectRetriever[BaseTool]]): Used to retrieve proper tool among the given tools.
		handle_reasoning_failure_fn (Optional[Callable[[CallbackManager, Exception], ToolOutput]]):
		enable_instruct (bool): Whether to enable user's instructing in the reasoning phase.
		enable_comment (bool): Whether to enable user's commenting in the acting phase.
	"""
	def __init__(
		self,
		tools: Sequence[BaseTool],
		llm: LLM,
		memory: BaseMemory,
		max_iterations: int = 10,
		react_chat_formatter: Optional[ReActChatFormatter] = None,
		output_parser: Optional[ReActOutputParser] = None,
		callback_manager: Optional[CallbackManager] = None,
		verbose: bool = False,
		tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
		handle_reasoning_failure_fn: Optional[Callable[[CallbackManager, Exception],
		ToolOutput]] = None,
		enable_instruct: bool = False,
		enable_comment: bool = False,
	):
		self.user_id_list = AccountManager().get_users()
		self.chat_group_list = AccountManager().get_chat_groups()
		step_engine = InstructReActAgentWorker.from_tools(
			tools=tools,
			tool_retriever=tool_retriever,
			user_id_list=self.user_id_list,
			chat_group_id_list=self.chat_group_list,
			llm=llm,
			max_iterations=max_iterations,
			react_chat_formatter=react_chat_formatter,
			output_parser=output_parser,
			callback_manager=callback_manager,
			verbose=verbose,
			handle_reasoning_failure_fn=handle_reasoning_failure_fn,
			enable_instruct=enable_instruct,
		)
		self._enable_comment = enable_comment
		super().__init__(
			step_engine,
			memory=memory,
			llm=llm,
			callback_manager=callback_manager,
		)

	def update_user_id_list(self):
		r""" Update the registered user ids """
		self.user_id_list = AccountManager().get_users()
		self.agent_worker.user_id_list = self.user_id_list

	def set_enable_instruct(self, enable: bool):
		r""" Set enable_instruct. """
		self.agent_worker.set_enable_instruct(enable)

	def set_enable_comment(self, enable: bool):
		r""" Set enable_comment. """
		self._enable_comment = enable

	@property
	def enable_instruct(self):
		r""" Enable user's instruction in Reasoning Phase. """
		return self.agent_worker.enable_instruct

	@property
	def enable_comment(self):
		r""" Enable user's instruction in Acting Phase. """
		return self._enable_comment

	def final_process_tool_logs(self, task: Task) -> Tuple[str, List[str]]:
		r"""
		Process the tool logs of the agent's acting.

		1. Record the log_to_system: log_to_system will be recorded to the long-term memory.
		2. Extract the log_to_user: log_to_user will be attached to the agent's answer.
		3. Extract the references: references are the file paths of the relevant documents. This information will be
		sent to the frontend.
		"""
		tool_log_list = task.extra_state["tool_log"]
		tool_logs_str = get_all_system_logs(tool_logs=tool_log_list)

		# task.extra_state["new_memory"].put(
		# 	ChatMessage(
		# 		content=tool_logs_str,
		# 		role=MessageRole.TOOL,
		# 	)
		# )
		to_user_logs = get_extra_str_to_user(tool_logs=tool_log_list)
		ref_file_paths = get_ref_file_paths(tool_logs=tool_log_list)
		return to_user_logs, ref_file_paths

	@dispatcher.span
	def _chat(self, message: str, chat_history: Optional[List[ChatMessage]] = None,
		tool_choice: Union[str, dict] = "auto",
		mode: ChatResponseMode = ChatResponseMode.WAIT, ) -> AGENT_CHAT_RESPONSE_TYPE:
		"""
		Chat with step executor.
		User is able to instruct or comment.
		"""
		if chat_history is not None:
			self.memory.set(chat_history)

		packed_msgs = PackedUserMessage.loads(dumped_str=message)
		user_id, chat_group_id = packed_msgs.user_id, packed_msgs.chat_group_id
		user_msg, system_msg = packed_msgs.user_msg, packed_msgs.system_msg

		task = self.create_task(
			input=user_msg,
			extra_state={
				"system_msg": system_msg,
				"user_id": user_id,
				"enable_instruct": ChatBuffer.config_buffer[user_id].enable_instruct,
				"enable_comment": ChatBuffer.config_buffer[user_id].enable_comment,
			}
		)
		if chat_group_id is not None:
			task.extra_state["chat_group_id"] = chat_group_id

		result_output = None
		dispatcher.event(AgentChatWithStepStartEvent(user_msg=user_msg))

		# 显式获取 initial step
		step = self.state.get_step_queue(task.task_id).popleft()

		while True:
			# pass step queue in as argument, assume step executor is stateless
			cur_step_output = self._run_step(
				task.task_id,
				step=step,
				mode=mode,
				tool_choice=tool_choice,
			)

			if cur_step_output.is_last:
				result_output = cur_step_output
				break

			step_queue = self.state.get_step_queue(task.task_id)
			step = step_queue.popleft()

			# Send the observation to the user.
			if task.extra_state["enable_comment"]:
				# TODO: 将 cur_step_output.output.response 输出给 User, 获取 User 的 Instruction。
				print_text(text=cur_step_output.output.response, color="llama_turquoise", end="\n")
				# TODO: 获取下一步 step, 并将Instruction作为 step.input。
				packed_msgs = ChatBuffer.test_get_user_text(
					user_id=user_id,
					enable_instruct=False,
					enable_comment=False,
				)

				user_comment = packed_msgs.user_msg
				system_msg = packed_msgs.system_msg
				update_intervene_status(
					task=task,
					enable_instruct=ChatBuffer.config_buffer[user_id].enable_instruct,
					enable_comment=ChatBuffer.config_buffer[user_id].enable_comment,
					reply_in_speech=ChatBuffer.config_buffer[user_id].reply_in_speech,
				)
				# Add as the step's input
				step.input = user_comment
				step.step_state["system_msg"] = system_msg
				print_text(f">>> User's comment: \n {user_comment}", color="blue", end="\n")

			# ensure tool_choice does not cause endless loops
			tool_choice = "auto"

		to_user_logs, ref_file_paths = self.final_process_tool_logs(task=task)
		result = self.finalize_response(task.task_id, result_output, )
		# add the tool log if necessary.
		result.response += f"\n\n{to_user_logs}"
		dispatcher.event(AgentChatWithStepEndEvent(response=result))

		if result.metadata is None:
			result.metadata = {"references": ref_file_paths}
		else:
			result.metadata.update({"references": ref_file_paths})
		return result

	@dispatcher.span
	async def _achat(self, message: str, chat_history: Optional[List[ChatMessage]] = None,
		tool_choice: Union[str, dict] = "auto",
		mode: ChatResponseMode = ChatResponseMode.WAIT, ) -> AGENT_CHAT_RESPONSE_TYPE:
		"""
		Async version.
		Chat with step executor.
		User is able to instruct or comment.
		"""
		if chat_history is not None:
			self.memory.set(chat_history)

		packed_msgs = PackedUserMessage.loads(dumped_str=message)
		user_id, chat_group_id = packed_msgs.user_id, packed_msgs.chat_group_id
		user_msg, system_msg = packed_msgs.user_msg, packed_msgs.system_msg

		task = self.create_task(
			input=user_msg,
			extra_state={
				"system_msg": system_msg,
				"user_id": user_id,
				"enable_instruct": ChatBuffer.config_buffer[user_id].enable_instruct,
				"enable_comment": ChatBuffer.config_buffer[user_id].enable_comment,
				"reply_in_speech": ChatBuffer.config_buffer[user_id].reply_in_speech,
			}
		)
		if chat_group_id is not None:
			task.extra_state["chat_group_id"] = chat_group_id

		result_output = None
		dispatcher.event(AgentChatWithStepStartEvent(user_msg=user_msg))

		# explicitly get initial step
		step = self.state.get_step_queue(task.task_id).popleft()
		while True:
			# pass step queue in as argument, assume step executor is stateless
			cur_step_output = await self._arun_step(
				task.task_id,
				step=step,
				mode=mode,
				tool_choice=tool_choice,
			)

			if cur_step_output.is_last:
				result_output = cur_step_output
				break

			step_queue = self.state.get_step_queue(task.task_id)
			step = step_queue.popleft()

			# Send the observation to the user.
			if task.extra_state["enable_comment"]:
				# TODO: 将 cur_step_output.output.response 输出给 User, 获取 User 的 Instruction。
				ChatBuffer.put_agent_reply(
					user_id=user_id,
					reply_str=cur_step_output.output.response,
					inner_chat=True,
				)
				# TODO: 将Instruction作为 step.input, 以及将 system_msg 记入 step.extra_state。
				packed_msgs = await ChatBuffer.get_user_msg(user_id=user_id)
				user_comment, system_msg = packed_msgs.user_msg, packed_msgs.system_msg
				# update
				update_intervene_status(
					task=task,
					enable_instruct=ChatBuffer.config_buffer[user_id].enable_instruct,
					enable_comment=ChatBuffer.config_buffer[user_id].enable_comment,
					reply_in_speech=ChatBuffer.config_buffer[user_id].reply_in_speech,
				)
				# add to the step's input
				step.input = user_comment
				step.step_state["system_msg"] = system_msg
				print_text(
					f"System: {system_msg}"
					f">>> User's comment: \n {user_comment}",
					color="blue",
					end="\n",
				)

			# ensure tool_choice does not cause endless loops
			tool_choice = "auto"

		to_user_logs, ref_file_paths = self.final_process_tool_logs(task=task)
		result = self.finalize_response(task.task_id, result_output, )
		result.response += f"\n\n{to_user_logs}"
		dispatcher.event(AgentChatWithStepEndEvent(response=result))
		if result.metadata is None:
			result.metadata = {"references": ref_file_paths}
		else:
			result.metadata.update({"references": ref_file_paths})
		return result

	@classmethod
	def from_tools(
		cls,
		tools: Optional[List[BaseTool]] = None,
		tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
		llm: Optional[LLM] = None,
		chat_history: Optional[List[ChatMessage]] = None,
		memory: Optional[BaseMemory] = None,
		memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
		max_iterations: int = 10,
		react_chat_formatter: Optional[ReActChatFormatter] = None,
		output_parser: Optional[ReActOutputParser] = None,
		callback_manager: Optional[CallbackManager] = None,
		verbose: bool = False,
		handle_reasoning_failure_fn: Optional[Callable[[CallbackManager, Exception], ToolOutput]] = None,
		enable_instruct: bool = False,
		enable_comment: bool = False,
		**kwargs: Any,
	) -> "InstructReActAgent":
		"""
		Convenience constructor method from set of BaseTools (Optional).

		NOTE: kwargs should have been exhausted by this point. In other words
		the various upstream components such as BaseSynthesizer (response synthesizer)
		or BaseRetriever should have picked up off their respective kwargs in their
		constructions.

		If `handle_reasoning_failure_fn` is provided, when LLM fails to follow the response templates specified in
		the System Prompt, this function will be called. This function should provide to the Agent, so that the Agent
		can have a second chance to fix its mistakes.
		To handle the exception yourself, you can provide a function that raises the `Exception`.

		Note: If you modified any response template in the System Prompt, you should override the method
		`_extract_reasoning_step` in `ReActAgentWorker`.

		Returns:
			InstructReActAgent
		"""
		llm = llm or Settings.llm
		if callback_manager is not None:
			llm.callback_manager = callback_manager
		memory = memory or memory_cls.from_defaults(chat_history=chat_history or [], llm=llm)
		return cls(
			tools=tools or [],
			tool_retriever=tool_retriever,
			llm=llm,
			memory=memory,
			max_iterations=max_iterations,
			react_chat_formatter=react_chat_formatter,
			output_parser=output_parser,
			callback_manager=callback_manager,
			verbose=verbose,
			handle_reasoning_failure_fn=handle_reasoning_failure_fn,
			enable_instruct=enable_instruct,
			enable_comment=enable_comment,
		)

labridge.agent.react.react.InstructReActAgent.enable_comment property

Enable user's instruction in Acting Phase.

labridge.agent.react.react.InstructReActAgent.enable_instruct property

Enable user's instruction in Reasoning Phase.

labridge.agent.react.react.InstructReActAgent.final_process_tool_logs(task)

Process the tool logs of the agent's acting.

  1. Record the log_to_system: log_to_system will be recorded to the long-term memory.
  2. Extract the log_to_user: log_to_user will be attached to the agent's answer.
  3. Extract the references: references are the file paths of the relevant documents. This information will be sent to the frontend.
Source code in labridge\agent\react\react.py
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
def final_process_tool_logs(self, task: Task) -> Tuple[str, List[str]]:
	r"""
	Process the tool logs of the agent's acting.

	1. Record the log_to_system: log_to_system will be recorded to the long-term memory.
	2. Extract the log_to_user: log_to_user will be attached to the agent's answer.
	3. Extract the references: references are the file paths of the relevant documents. This information will be
	sent to the frontend.
	"""
	tool_log_list = task.extra_state["tool_log"]
	tool_logs_str = get_all_system_logs(tool_logs=tool_log_list)

	# task.extra_state["new_memory"].put(
	# 	ChatMessage(
	# 		content=tool_logs_str,
	# 		role=MessageRole.TOOL,
	# 	)
	# )
	to_user_logs = get_extra_str_to_user(tool_logs=tool_log_list)
	ref_file_paths = get_ref_file_paths(tool_logs=tool_log_list)
	return to_user_logs, ref_file_paths

labridge.agent.react.react.InstructReActAgent.from_tools(tools=None, tool_retriever=None, llm=None, chat_history=None, memory=None, memory_cls=ChatMemoryBuffer, max_iterations=10, react_chat_formatter=None, output_parser=None, callback_manager=None, verbose=False, handle_reasoning_failure_fn=None, enable_instruct=False, enable_comment=False, **kwargs) classmethod

Convenience constructor method from set of BaseTools (Optional).

NOTE: kwargs should have been exhausted by this point. In other words the various upstream components such as BaseSynthesizer (response synthesizer) or BaseRetriever should have picked up off their respective kwargs in their constructions.

If handle_reasoning_failure_fn is provided, when LLM fails to follow the response templates specified in the System Prompt, this function will be called. This function should provide to the Agent, so that the Agent can have a second chance to fix its mistakes. To handle the exception yourself, you can provide a function that raises the Exception.

Note: If you modified any response template in the System Prompt, you should override the method _extract_reasoning_step in ReActAgentWorker.

RETURNS DESCRIPTION
InstructReActAgent

InstructReActAgent

Source code in labridge\agent\react\react.py
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
@classmethod
def from_tools(
	cls,
	tools: Optional[List[BaseTool]] = None,
	tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
	llm: Optional[LLM] = None,
	chat_history: Optional[List[ChatMessage]] = None,
	memory: Optional[BaseMemory] = None,
	memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
	max_iterations: int = 10,
	react_chat_formatter: Optional[ReActChatFormatter] = None,
	output_parser: Optional[ReActOutputParser] = None,
	callback_manager: Optional[CallbackManager] = None,
	verbose: bool = False,
	handle_reasoning_failure_fn: Optional[Callable[[CallbackManager, Exception], ToolOutput]] = None,
	enable_instruct: bool = False,
	enable_comment: bool = False,
	**kwargs: Any,
) -> "InstructReActAgent":
	"""
	Convenience constructor method from set of BaseTools (Optional).

	NOTE: kwargs should have been exhausted by this point. In other words
	the various upstream components such as BaseSynthesizer (response synthesizer)
	or BaseRetriever should have picked up off their respective kwargs in their
	constructions.

	If `handle_reasoning_failure_fn` is provided, when LLM fails to follow the response templates specified in
	the System Prompt, this function will be called. This function should provide to the Agent, so that the Agent
	can have a second chance to fix its mistakes.
	To handle the exception yourself, you can provide a function that raises the `Exception`.

	Note: If you modified any response template in the System Prompt, you should override the method
	`_extract_reasoning_step` in `ReActAgentWorker`.

	Returns:
		InstructReActAgent
	"""
	llm = llm or Settings.llm
	if callback_manager is not None:
		llm.callback_manager = callback_manager
	memory = memory or memory_cls.from_defaults(chat_history=chat_history or [], llm=llm)
	return cls(
		tools=tools or [],
		tool_retriever=tool_retriever,
		llm=llm,
		memory=memory,
		max_iterations=max_iterations,
		react_chat_formatter=react_chat_formatter,
		output_parser=output_parser,
		callback_manager=callback_manager,
		verbose=verbose,
		handle_reasoning_failure_fn=handle_reasoning_failure_fn,
		enable_instruct=enable_instruct,
		enable_comment=enable_comment,
	)

labridge.agent.react.react.InstructReActAgent.set_enable_comment(enable)

Set enable_comment.

Source code in labridge\agent\react\react.py
125
126
127
def set_enable_comment(self, enable: bool):
	r""" Set enable_comment. """
	self._enable_comment = enable

labridge.agent.react.react.InstructReActAgent.set_enable_instruct(enable)

Set enable_instruct.

Source code in labridge\agent\react\react.py
121
122
123
def set_enable_instruct(self, enable: bool):
	r""" Set enable_instruct. """
	self.agent_worker.set_enable_instruct(enable)

labridge.agent.react.react.InstructReActAgent.update_user_id_list()

Update the registered user ids

Source code in labridge\agent\react\react.py
116
117
118
119
def update_user_id_list(self):
	r""" Update the registered user ids """
	self.user_id_list = AccountManager().get_users()
	self.agent_worker.user_id_list = self.user_id_list