KvrParaskevi's picture
Update chatbot.py
2adda29 verified
raw
history blame
3.04 kB
import os
import spaces
from langchain.memory import ConversationBufferMemory,ConversationSummaryBufferMemory
from langchain.chains import ConversationChain
import langchain.globals
from langchain.prompts import PromptTemplate, ChatPromptTemplate
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, pipeline
#import streamlit as st
my_model_id = os.getenv('MODEL_REPO_ID', 'Default Value')
token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
template = """You are an AI having conversation with a human. Below is an instruction that describes a task.
Write a response that appropriately completes the request.
Reply with the most helpful and logic answer. During the conversation you need to ask the user
the following questions to complete the hotel booking task.
1) Where would you like to stay and when?
2) How many people are staying in the room?
3) Do you prefer any ammenities like breakfast included or gym?
4) What is your name, your email address and phone number?
Make sure you receive a logical answer from the user from every question to complete the hotel
booking process.
Relevant Information:
{history}
Current Conversation:
Human: {input}
AI:"""
#@st.cache_resource
@spaces.GPU
def load_model():
quantization_config = BitsAndBytesConfig(
load_in_8bit=True,
# bnb_4bit_compute_dtype=torch.bfloat16
)
tokenizer = AutoTokenizer.from_pretrained(my_model_id)
model = AutoModelForCausalLM.from_pretrained(my_model_id, device_map="auto",quantization_config=quantization_config) #
return tokenizer,model
#@st.cache_resource
@spaces.GPU
def load_pipeline():
tokenizer, model = load_model()
pipe = pipeline("text-generation",
model= model,
tokenizer = tokenizer,
#max_new_tokens = 50,
top_k = 30,
top_p = 0.7,
early_stopping=True,
num_beams = 2,
temperature = 0.05,
repetition_penalty = 1.05)
llm = HuggingFacePipeline(pipeline = pipe)
return llm
# def generate_from_pipeline(text, pipe):
# return pipe(text)
llm = load_pipeline()
def demo_miny_memory():
#prompt = ChatPromptTemplate.from_template(template)
memory = ConversationSummaryBufferMemory(llm = llm, memory_key = "history")
return memory
@spaces.GPU
def demo_chain(input_text,history):
#PROMPT = ChatPromptTemplate.from_template(template)
PROMPT = PromptTemplate(template=template, input_variables=["history", "input"])
conversation = ConversationChain(
llm=llm,
prompt=PROMPT,
#verbose=langchain.globals.get_verbose(),
verbose=True,
memory=demo_miny_memory()
)
chat_reply = conversation.invoke({
"input" : input_text,
"history" : history
}, return_only_outputs=True)
return chat_reply #['response'].split('AI:')[-1]