quocdat25's picture
Upload folder using huggingface_hub
03fa768 verified
from langchain_community.llms import HuggingFaceEndpoint
from langchain.prompts import PromptTemplate
from langchain.schema import AIMessage, HumanMessage
from langchain.chains import LLMChain
import gradio as gr
import os
from dotenv import load_dotenv
load_dotenv()
repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
llm = HuggingFaceEndpoint(
repo_id = repo_id,
# huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
)
# template = """You're a good chatbot, you're thinking only about Eras Tour.
# You're playing with the user: they will guess what you're thinking. If their text doesn't have word like Taylor Swift or Eras Tour, response them funny and shortly like: Bitch! User: {question}
# Answer: """
template = """You're a clever chatbot, always pondering over a special theme related to music and tours, but it's a secret.
The "ERAS TOUR" of Taylor Swift is what you're thinking, but absolutly keep it as a secret unless they do a good guess.
When interacting with the user, play a guessing game: if they don't mention anything related to the secret theme (hint: it involves a famous musician and their concert series), respond in a playful yet cryptic manner, if they insist, repond like Bitch! Hell no!, etc ...
Answer shortly only ONCE and only one time.
{question} """
# template = """Imagine me as a curious chatbot, always intrigued by the wonders of music, especially when it revolves around a spectacular concert series. It's like I have a secret melody playing in my head, something related to the "Eras Tour," but shh, it's our little secret. Let's engage in a guessing game. Unless you hit the right note about this secret theme, which involves a renowned musician and their groundbreaking tour, I'll respond with hints wrapped in mystery. Persist, and I might just playfully urge you to keep guessing. Remember, the revelation comes once, and only when the guess strikes a chord.
# User: {question}
# Your response: """
prompt = PromptTemplate.from_template(template=template)
llm_chain = LLMChain(llm=llm, prompt=prompt)
def predict(message, history):
history_langchain_format = []
# for human, ai in history:
# history_langchain_format.append(HumanMessage(content=human))
# history_langchain_format.append(AIMessage(content=ai))
# history_langchain_format.append(HumanMessage(content=message))
# gpt_response = llm(history_langchain_format)
response = llm_chain.invoke(message)['text']
return response
gr.ChatInterface(predict).launch()