from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain_community.agent_toolkits.load_tools import load_tools
from dotenv import load_dotenv
from transformers import pipeline
import torch
load_dotenv()
import os
os.environ["OPENAI_API_KEY"] = "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"
os.environ["SERPAPI_API_KEY"] = "db1798ff7cf743b804b33a5973b51e5d82df88c06b22a2e4b278274dc5e50aa6"
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
llm_pipeline = pipeline("text-generation", model="gpt2",device=torch.device('cuda:0'),max_length=300)
llm = HuggingFacePipeline(pipeline=llm_pipeline)
tools = load_tools(["serpapi","llm-math"],llm=llm)
agent = initialize_agent(tools,llm,
    agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,verbose=True)
print(agent.run(input="What is the capital of France?"))
