Spaces:
Runtime error
Runtime error
import os | |
os.environ['COHERE_API_KEY'] = os.getenv('cohere_ai') | |
# Create the Cohere chat model | |
from langchain_cohere.chat_models import ChatCohere | |
chat = ChatCohere(model="command-r-plus", temperature=0.3) | |
from langchain_community.tools.tavily_search import TavilySearchResults | |
os.environ['TAVILY_API_KEY'] = os.getenv('tavily_ai') | |
internet_search = TavilySearchResults() | |
internet_search.name = "internet_search" | |
internet_search.description = "Returns a list of relevant document snippets for a textual query retrieved from the internet." | |
from langchain_core.pydantic_v1 import BaseModel, Field | |
class TavilySearchInput(BaseModel): | |
query: str = Field(description="Query to search the internet with") | |
internet_search.args_schema = TavilySearchInput | |
from langchain.agents import Tool | |
from langchain_experimental.utilities import PythonREPL | |
python_repl = PythonREPL() | |
repl_tool = Tool( | |
name="python_repl", | |
description="Executes python code and returns the result. The code runs in a static sandbox without interactive mode, so print output or save output to a file.", | |
func=python_repl.run, | |
) | |
repl_tool.name = "python_interpreter" | |
# from langchain_core.pydantic_v1 import BaseModel, Field | |
class ToolInput(BaseModel): | |
code: str = Field(description="Python code to execute.") | |
repl_tool.args_schema = ToolInput | |
from langchain.agents import AgentExecutor | |
from langchain_cohere.react_multi_hop.agent import create_cohere_react_agent | |
from langchain_core.prompts import ChatPromptTemplate | |
# Create the prompt | |
prompt = ChatPromptTemplate.from_template("{input}") | |
# Create the ReAct agent | |
agent = create_cohere_react_agent( | |
llm=chat, | |
tools=[internet_search, repl_tool], | |
prompt=prompt, | |
) | |
agent_executor = AgentExecutor(agent=agent, tools=[internet_search, repl_tool], verbose=True) | |
from typing import List, Mapping, Any | |
from langchain_cohere.common import CohereCitation | |
def process_data(problem): | |
output = 'Gemini agent rewriting your query \n\n' | |
yield output | |
#rewrite = get_completion(f"Rewrite the user question: {problem} ") | |
#output += f"Here is your rewritten query: {rewrite} \n\n" | |
#yield output | |
output += f"Cohere agent gathering the data from public sources and doing analysis \n\n" | |
yield output | |
coh_output = agent_executor.invoke({ | |
"input": f"{problem}"}) | |
print ("Output is",coh_output['output']) | |
output += f"Final Output: \n\n"+coh_output['output']+"\n\n" | |
yield output | |
citations = coh_output['citations'] | |
print (citations) | |
# Assuming 'citi' is a list of CohereCitation objects | |
urls = [] | |
for item in citations: | |
if isinstance(item, CohereCitation) and item.documents: | |
for doc in item.documents: | |
if 'url' in doc: | |
urls.append(doc['url']) | |
final_urls = list(set(urls)) | |
output += f"Citations: \n\n" + '\n'.join(final_urls) | |
yield output | |
return output |