Spaces:
Running
Running
from langchain.chat_models import ChatOpenAI | |
from langchain.schema import AIMessage, HumanMessage | |
from langchain_community.chat_models import ChatOpenAI | |
from langchain_openai import ChatOpenAI | |
import openai, os | |
import gradio as gr | |
from dotenv import load_dotenv | |
load_dotenv() | |
OPENAI_API_KEY = os.environ['GROQ_API_KEY'] | |
# llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613') | |
llm = openai(temperature=1.0, model='gpt-4-turbo') | |
# llm = openai(temperature=1.0, model='gpt-4o') | |
def predict(message, history): | |
history_langchain_format = [] | |
for human, ai in history: | |
history_langchain_format.append(HumanMessage(content=human)) | |
history_langchain_format.append(AIMessage(content=ai)) | |
history_langchain_format.append(HumanMessage(content=message)) | |
gpt_response = llm(history_langchain_format) | |
return gpt_response.content | |
gr.ChatInterface(predict).launch() |