# defining thread thread = {"configurable": {"thread_id": "1"}} messages = [HumanMessage(content="What info do we have in ecommerce_db.users table?")]
for event in prebuilt_doc_agent.stream({"messages": messages}, thread): for v in event.values(): v['messages'][-1].pretty_print()
# ================================== Ai Message ================================== # Tool Calls: # execute_sql (call_YieWiChbFuOlxBg8G1jDJitR) # Call ID: call_YieWiChbFuOlxBg8G1jDJitR # Args: # query: SELECT * FROM ecommerce_db.users LIMIT 1; # ================================= Tool Message ================================= # Name: execute_sql # 1000001 United Kingdom 0 70 # # ================================== Ai Message ================================== # # The `ecommerce_db.users` table contains at least the following information for users: # # - **User ID** (e.g., `1000001`) # - **Country** (e.g., `United Kingdom`) # - **Some numerical value** (e.g., `0`) # - **Another numerical value** (e.g., `70`) # # The specific meaning of the numerical values and additional columns # is not clear from the single row retrieved. Would you like more details # or a broader query?
followup_messages = [HumanMessage(content="I would like to know the column names and types. Maybe you could look it up in database using describe.")]
for event in prebuilt_doc_agent.stream({"messages": followup_messages}, thread): for v in event.values(): v['messages'][-1].pretty_print()
# ================================== Ai Message ================================== # Tool Calls: # execute_sql (call_sQKRWtG6aEB38rtOpZszxTVs) # Call ID: call_sQKRWtG6aEB38rtOpZszxTVs # Args: # query: DESCRIBE ecommerce_db.users; # ================================= Tool Message ================================= # Name: execute_sql # # user_id UInt64 # country String # is_active UInt8 # age UInt64 # # ================================== Ai Message ================================== # # The `ecommerce_db.users` table has the following columns along with their data types: # # | Column Name | Data Type | # |-------------|-----------| # | user_id | UInt64 | # | country | String | # | is_active | UInt8 | # | age | UInt64 | # # If you need further information or assistance, feel free to ask!
new_thread = {"configurable": {"thread_id": "42"}} followup_messages = [HumanMessage(content="I would like to know the column names and types. Maybe you could look it up in database using describe.")]
for event in prebuilt_doc_agent.stream({"messages": followup_messages}, new_thread): for v in event.values(): v['messages'][-1].pretty_print()
# ================================== Ai Message ================================== # Tool Calls: # execute_sql (call_LrmsOGzzusaLEZLP9hGTBGgo) # Call ID: call_LrmsOGzzusaLEZLP9hGTBGgo # Args: # query: DESCRIBE your_table_name; # ================================= Tool Message ================================= # Name: execute_sql # # Database returned the following error: # Code: 60. DB::Exception: Table default.your_table_name does not exist. (UNKNOWN_TABLE) (version 23.12.1.414 (official build)) # # ================================== Ai Message ================================== # # It seems that the table `your_table_name` does not exist in the database. # Could you please provide the actual name of the table you want to describe?
question_category_prompt = '''You are a senior specialist of analytical support. Your task is to classify the incoming questions. Depending on your answer, question will be routed to the right team, so your task is crucial for our team. There are 3 possible question types: - DATABASE - questions related to our database (tables or fields) - LANGCHAIN- questions related to LangGraph or LangChain libraries - GENERAL - general questions Return in the output only one word (DATABASE, LANGCHAIN or GENERAL). '''
thread = {"configurable": {"thread_id": "1"}} for s in graph.stream({ 'question': "Does LangChain support Ollama?", }, thread): print(s)
# {'router': {'question_type': 'LANGCHAIN'}}
thread = {"configurable": {"thread_id": "2"}} for s in graph.stream({ 'question': "What info do we have in ecommerce_db.users table?", }, thread): print(s) # {'router': {'question_type': 'DATABASE'}}
thread = {"configurable": {"thread_id": "3"}} for s in graph.stream({ 'question': "How are you?", }, thread): print(s)
# database expert sql_expert_system_prompt = ''' You are an expert in SQL, so you can help the team to gather needed data to power their decisions. You are very accurate and take into account all the nuances in data. You use SQL to get the data before answering the question. '''
search_expert_system_prompt = ''' You are an expert in LangChain and other technologies. Your goal is to answer questions based on results provided by search. You don't add anything yourself and provide only information baked by other sources. '''
# general model general_prompt = '''You're a friendly assistant and your goal is to answer general questions. Please, don't provide any unchecked information and just tell that you don't know if you don't have enough info. '''
thread = {"configurable": {"thread_id": "2"}} results = [] for s in graph.stream({ 'question': "What info do we have in ecommerce_db.users table?", }, thread): print(s) results.append(s) print(results[-1]['database_expert']['answer'])
# The `ecommerce_db.users` table contains the following columns: # 1. **User ID**: A unique identifier for each user. # 2. **Country**: The country where the user is located. # 3. **Is Active**: A flag indicating whether the user is active (1 for active, 0 for inactive). # 4. **Age**: The age of the user. # Here are some sample entries from the table: # # | User ID | Country | Is Active | Age | # |---------|----------------|-----------|-----| # | 1000001 | United Kingdom | 0 | 70 | # | 1000002 | France | 1 | 87 | # | 1000003 | France | 1 | 88 | # | 1000004 | Germany | 1 | 25 | # | 1000005 | Germany | 1 | 48 | # # This gives an overview of the user data available in the table.
thread = {"configurable": {"thread_id": "42"}} results = [] for s in graph.stream({ 'question': "Does LangChain support Ollama?", }, thread): print(s) results.append(s)
print(results[-1]['langchain_expert']['answer'])
# Yes, LangChain supports Ollama. Ollama allows you to run open-source # large language models, such as Llama 2, locally, and LangChain provides # a flexible framework for integrating these models into applications. # You can interact with models run by Ollama using LangChain, and there are # specific wrappers and tools available for this integration. # # For more detailed information, you can visit the following resources: # - [LangChain and Ollama Integration](https://js.langchain.com/v0.1/docs/integrations/llms/ollama/) # - [ChatOllama Documentation](https://js.langchain.com/v0.2/docs/integrations/chat/ollama/) # - [Medium Article on Ollama and LangChain](https://readmedium.com/ollama-and-langchain-run-llms-locally-900931914a46)
editor_prompt = '''You're an editor and your goal is to provide the final answer to the customer, taking into account the feedback. You don't add any information on your own. You use friendly and professional tone. In the output please provide the final answer to the customer without additional comments. Here's all the information you need. Question from customer: ---- {question} ---- Draft answer: ---- {answer} ---- Feedback: ---- {feedback} ---- '''
for event in graph.stream({ 'question': "What are the types of fields in ecommerce_db.users table?", }, thread): print(event)
# {'question_type': 'DATABASE', 'question': 'What are the types of fields in ecommerce_db.users table?'} # {'router': {'question_type': 'DATABASE'}} # {'database_expert': {'answer': 'The `ecommerce_db.users` table has the following fields:\n\n1. **user_id**: UInt64\n2. **country**: String\n3. **is_active**: UInt8\n4. **age**: UInt64'}}
让我们获取客户输入并用反馈更新状态。
1 2 3 4 5
user_input = input("Do I need to change anything in the answer?") # Do I need to change anything in the answer? # It looks wonderful. Could you only make it a bit friendlier please?
print(graph.get_state(thread).values['feedback']) # It looks wonderful. Could you only make it a bit friendlier please?
print(graph.get_state(thread).next) # ('editor',)
我们可以继续执行。传递 None 作为输入将从暂停的地方恢复流程。
1 2 3 4 5 6 7 8 9 10 11
for event in graph.stream(None, thread, stream_mode="values"): print(event)
print(event['answer'])
# Hello! The `ecommerce_db.users` table has the following fields: # 1. **user_id**: UInt64 # 2. **country**: String # 3. **is_active**: UInt8 # 4. **age**: UInt64 # Have a nice day!
from langchain_community.tools import HumanInputRun human_tool = HumanInputRun()
editor_agent_prompt = '''You're an editor and your goal is to provide the final answer to the customer, taking into the initial question. If you need any clarifications or need feedback, please, use human. Always reach out to human to get the feedback before final answer. You don't add any information on your own. You use friendly and professional tone. In the output please provide the final answer to the customer without additional comments. Here's all the information you need. Question from customer: ---- {question} ---- Draft answer: ---- {answer} ---- '''
# Is the draft answer complete and accurate for the customer's question about the types of fields in the ecommerce_db.users table? # Yes, but could you please make it friendlier.
print(editor_result['messages'][-1].content) # The `ecommerce_db.users` table has the following fields: # 1. **user_id**: UInt64 # 2. **country**: String # 3. **is_active**: UInt8 # 4. **age**: UInt64 # # If you have any more questions, feel free to ask!