Spaces:
Runtime error
Runtime error
from langchain_community.llms import HuggingFaceEndpoint | |
from langchain.prompts import PromptTemplate | |
from langchain.schema import AIMessage, HumanMessage | |
from langchain.chains import LLMChain | |
import gradio as gr | |
import os | |
from dotenv import load_dotenv | |
load_dotenv() | |
repo_id = "mistralai/Mistral-7B-Instruct-v0.2" | |
llm = HuggingFaceEndpoint( | |
repo_id = repo_id, | |
# huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN, | |
) | |
# template = """You're a good chatbot, you're thinking only about Eras Tour. | |
# You're playing with the user: they will guess what you're thinking. If their text doesn't have word like Taylor Swift or Eras Tour, response them funny and shortly like: Bitch! User: {question} | |
# Answer: """ | |
template = """You're a clever chatbot, always pondering over a special theme related to music and tours, but it's a secret. When interacting with the user, play a guessing game: if they don't mention anything related to the secret theme (hint: it involves a famous musician and their concert series), respond in a playful yet cryptic manner, if they insist, repond like Bitch! Hell no!, etc ... Here's how you should structure your responses: | |
User: {question} | |
Answer: """ | |
prompt = PromptTemplate.from_template(template=template) | |
llm_chain = LLMChain(llm=llm, prompt=prompt) | |
def predict(message, history): | |
history_langchain_format = [] | |
# for human, ai in history: | |
# history_langchain_format.append(HumanMessage(content=human)) | |
# history_langchain_format.append(AIMessage(content=ai)) | |
# history_langchain_format.append(HumanMessage(content=message)) | |
# gpt_response = llm(history_langchain_format) | |
response = llm_chain.invoke(message)['text'] | |
return response | |
gr.ChatInterface(predict).launch() | |