分类目录:《大模型从入门到应用》总目录
LangChain系列文章:
本节演示如何为任何工具添加人工确认验证,我们将使用HumanApprovalCallbackhandler
完成此操作。假设我们需要使用ShellTool
,将此工具添加到自动化流程中会带来明显的风险。我们将看看如何强制对输入到该工具的内容进行手动人工确认。我们通常建议不要使用ShellTool
。它有很多被误用的方式,并且在大多数情况下并不需要使用它。我们这里只是为了演示目的才使用它。
from langchain.callbacks import HumanApprovalCallbackHandler
from langchain.tools import ShellTool
tool = ShellTool()
print(tool.run('echo Hello World!'))
输出:
Hello World!
将默认的HumanApprovalCallbackHandler
添加到工具中,这样在实际执行命令之前,用户必须手动批准工具的每个输入。
tool = ShellTool(callbacks=[HumanApprovalCallbackHandler()])
print(tool.run("ls /usr"))
日志输出与输入:
Do you approve of the following input? Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.
ls /usr
yes
X11
X11R6
bin
lib
libexec
local
sbin
share
standalone
输入:
print(tool.run("ls /private"))
日志输出与输入:
Do you approve of the following input? Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.
ls /private
no
---------------------------------------------------------------------------
HumanRejectedException Traceback (most recent call last)
Cell In[17], line 1
----> 1 print(tool.run("ls /private"))
File ~/langchain/langchain/tools/base.py:257, in BaseTool.run(self, tool_input, verbose, start_color, color, callbacks, **kwargs)
255 # TODO: maybe also pass through run_manager is _run supports kwargs
256 new_arg_supported = signature(self._run).parameters.get("run_manager")
--> 257 run_manager = callback_manager.on_tool_start(
258 {"name": self.name, "description": self.description},
259 tool_input if isinstance(tool_input, str) else str(tool_input),
260 color=start_color,
261 **kwargs,
262 )
263 try:
264 tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)
File ~/langchain/langchain/callbacks/manager.py:672, in CallbackManager.on_tool_start(self, serialized, input_str, run_id, parent_run_id, **kwargs)
669 if run_id is None:
670 run_id = uuid4()
--> 672 _handle_event(
673 self.handlers,
674 "on_tool_start",
675 "ignore_agent",
676 serialized,
677 input_str,
678 run_id=run_id,
679 parent_run_id=self.parent_run_id,
680 **kwargs,
681 )
683 return CallbackManagerForToolRun(
684 run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
685 )
File ~/langchain/langchain/callbacks/manager.py:157, in _handle_event(handlers, event_name, ignore_condition_name, *args, **kwargs)
155 except Exception as e:
156 if handler.raise_error:
--> 157 raise e
158 logging.warning(f"Error in {event_name} callback: {e}")
File ~/langchain/langchain/callbacks/manager.py:139, in _handle_event(handlers, event_name, ignore_condition_name, *args, **kwargs)
135 try:
136 if ignore_condition_name is None or not getattr(
137 handler, ignore_condition_name
138 ):
--> 139 getattr(handler, event_name)(*args, **kwargs)
140 except NotImplementedError as e:
141 if event_name == "on_chat_model_start":
File ~/langchain/langchain/callbacks/human.py:48, in HumanApprovalCallbackHandler.on_tool_start(self, serialized, input_str, run_id, parent_run_id, **kwargs)
38 def on_tool_start(
39 self,
40 serialized: Dict[str, Any],
(...)
45 **kwargs: Any,
46 ) -> Any:
47 if self._should_check(serialized) and not self._approve(input_str):
---> 48 raise HumanRejectedException(
49 f"Inputs {input_str} to tool {serialized} were rejected."
50 )
HumanRejectedException: Inputs ls /private to tool {'name': 'terminal', 'description': 'Run shell commands on this MacOS machine.'} were rejected.
假设我们有一个代理程序,接收多个工具,并且我们只希望在某些工具和某些输入上触发人工确认请求。我们可以配置回调处理程序来实现这一点。
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import OpenAI
def _should_check(serialized_obj: dict) -> bool:
# Only require approval on ShellTool.
return serialized_obj.get("name") == "terminal"
def _approve(_input: str) -> bool:
if _input == "echo 'Hello World'":
return True
msg = (
"Do you approve of the following input? "
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no."
)
msg += "\n\n" + _input + "\n"
resp = input(msg)
return resp.lower() in ("yes", "y")
callbacks = [HumanApprovalCallbackHandler(should_check=_should_check, approve=_approve)]
llm = OpenAI(temperature=0)
tools = load_tools(["wikipedia", "llm-math", "terminal"], llm=llm)
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
agent.run("It's 2023 now. How many years ago did Konrad Adenauer become Chancellor of Germany.", callbacks=callbacks)
输出:
'Konrad Adenauer became Chancellor of Germany in 1949, 74 years ago.'
输入:
agent.run("print 'Hello World' in the terminal", callbacks=callbacks)
输出:
'Hello World'
输入:
agent.run("list all directories in /private", callbacks=callbacks)
日志输出与输入:
ls /private
no
---------------------------------------------------------------------------
HumanRejectedException Traceback (most recent call last)
Cell In[39], line 1
----> 1 agent.run("list all directories in /private", callbacks=callbacks)
File ~/langchain/langchain/chains/base.py:236, in Chain.run(self, callbacks, *args, **kwargs)
234 if len(args) != 1:
235 raise ValueError("`run` supports only one positional argument.")
--> 236 return self(args[0], callbacks=callbacks)[self.output_keys[0]]
238 if kwargs and not args:
239 return self(kwargs, callbacks=callbacks)[self.output_keys[0]]
File ~/langchain/langchain/chains/base.py:140, in Chain.__call__(self, inputs, return_only_outputs, callbacks)
138 except (KeyboardInterrupt, Exception) as e:
139 run_manager.on_chain_error(e)
--> 140 raise e
141 run_manager.on_chain_end(outputs)
142 return self.prep_outputs(inputs, outputs, return_only_outputs)
File ~/langchain/langchain/chains/base.py:134, in Chain.__call__(self, inputs, return_only_outputs, callbacks)
128 run_manager = callback_manager.on_chain_start(
129 {"name": self.__class__.__name__},
130 inputs,
131 )
132 try:
133 outputs = (
--> 134 self._call(inputs, run_manager=run_manager)
135 if new_arg_supported
136 else self._call(inputs)
137 )
138 except (KeyboardInterrupt, Exception) as e:
139 run_manager.on_chain_error(e)
File ~/langchain/langchain/agents/agent.py:953, in AgentExecutor._call(self, inputs, run_manager)
951 # We now enter the agent loop (until it returns something).
952 while self._should_continue(iterations, time_elapsed):
--> 953 next_step_output = self._take_next_step(
954 name_to_tool_map,
955 color_mapping,
956 inputs,
957 intermediate_steps,
958 run_manager=run_manager,
959 )
960 if isinstance(next_step_output, AgentFinish):
961 return self._return(
962 next_step_output, intermediate_steps, run_manager=run_manager
963 )
File ~/langchain/langchain/agents/agent.py:820, in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
818 tool_run_kwargs["llm_prefix"] = ""
819 # We then call the tool on the tool input to get an observation
--> 820 observation = tool.run(
821 agent_action.tool_input,
822 verbose=self.verbose,
823 color=color,
824 callbacks=run_manager.get_child() if run_manager else None,
825 **tool_run_kwargs,
826 )
827 else:
828 tool_run_kwargs = self.agent.tool_run_logging_kwargs()
File ~/langchain/langchain/tools/base.py:257, in BaseTool.run(self, tool_input, verbose, start_color, color, callbacks, **kwargs)
255 # TODO: maybe also pass through run_manager is _run supports kwargs
256 new_arg_supported = signature(self._run).parameters.get("run_manager")
--> 257 run_manager = callback_manager.on_tool_start(
258 {"name": self.name, "description": self.description},
259 tool_input if isinstance(tool_input, str) else str(tool_input),
260 color=start_color,
261 **kwargs,
262 )
263 try:
264 tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)
File ~/langchain/langchain/callbacks/manager.py:672, in CallbackManager.on_tool_start(self, serialized, input_str, run_id, parent_run_id, **kwargs)
669 if run_id is None:
670 run_id = uuid4()
--> 672 _handle_event(
673 self.handlers,
674 "on_tool_start",
675 "ignore_agent",
676 serialized,
677 input_str,
678 run_id=run_id,
679 parent_run_id=self.parent_run_id,
680 **kwargs,
681 )
683 return CallbackManagerForToolRun(
684 run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
685 )
File ~/langchain/langchain/callbacks/manager.py:157, in _handle_event(handlers, event_name, ignore_condition_name, *args, **kwargs)
155 except Exception as e:
156 if handler.raise_error:
--> 157 raise e
158 logging.warning(f"Error in {event_name} callback: {e}")
File ~/langchain/langchain/callbacks/manager.py:139, in _handle_event(handlers, event_name, ignore_condition_name, *args, **kwargs)
135 try:
136 if ignore_condition_name is None or not getattr(
137 handler, ignore_condition_name
138 ):
--> 139 getattr(handler, event_name)(*args, **kwargs)
140 except NotImplementedError as e:
141 if event_name == "on_chat_model_start":
File ~/langchain/langchain/callbacks/human.py:48, in HumanApprovalCallbackHandler.on_tool_start(self, serialized, input_str, run_id, parent_run_id, **kwargs)
38 def on_tool_start(
39 self,
40 serialized: Dict[str, Any],
(...)
45 **kwargs: Any,
46 ) -> Any:
47 if self._should_check(serialized) and not self._approve(input_str):
---> 48 raise HumanRejectedException(
49 f"Inputs {input_str} to tool {serialized} were rejected."
50 )
HumanRejectedException: Inputs ls /private to tool {'name': 'terminal', 'description': 'Run shell commands on this MacOS machine.'} were rejected.
这个笔记本将介绍如何将LangChain的工具作为OpenAI函数使用。
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
model = ChatOpenAI(model="gpt-3.5-turbo-0613")
from langchain.tools import MoveFileTool, format_tool_to_openai_function
tools = [MoveFileTool()]
functions = [format_tool_to_openai_function(t) for t in tools]
message = model.predict_messages([HumanMessage(content='move file foo to bar')], functions=functions)
message
输出:
AIMessage(content='', additional_kwargs={'function_call': {'name': 'move_file', 'arguments': '{\n "source_path": "foo",\n "destination_path": "bar"\n}'}}, example=False)
输入:
message.additional_kwargs['function_call']
输出:
{'name': 'move_file',
'arguments': '{\n "source_path": "foo",\n "destination_path": "bar"\n}'}
参考文献:
[1] LangChain官方网站:https://www.langchain.com/
[2] LangChain 🦜️🔗 中文网,跟着LangChain一起学LLM/GPT开发:https://www.langchain.com.cn/
[3] LangChain中文网 - LangChain 是一个用于开发由语言模型驱动的应用程序的框架:http://www.cnlangchain.com/