import os import streamlit as st from streamlit_extras.let_it_rain import rain from transformers import AutoTokenizer from langchain.chat_models import ChatOpenAI from langchain.schema import AIMessage,HumanMessage,SystemMessage from langchain import PromptTemplate, LLMChain from langchain import HuggingFacePipeline import transformers import torch from huggingface_hub import login def get_response(question): st.session_state.sessionMessages.append(HumanMessage(content=question)) assistant_answer = chat(st.session_state.sessionMessages ) st.session_state.sessionMessages.append(AIMessage(content=assistant_answer.content)) return assistant_answer.content def get_sentiment(user_input, llm_chain): result = llm_chain.run(user_input) return result.lower() def init_llama_model(): model = "meta-llama/Llama-2-7b-chat-hf" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, device_map="auto", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id ) llm = HuggingFacePipeline(pipeline = pipeline, model_kwargs = {'temperature':0.3}) template = '''Classify the text into neutral, negative, or positive. Reply with only one word: Positive, Negative, or Neutral. Examples: Text: You will simply love the Big variety of snacks (sweet and savoury) and you can't get wrong if you choose the place for a quick meal or coffee. Sentiment: Positive. Text: I got food poisoning Sentiment: Negative. Text: {text} Sentiment: ''' prompt = PromptTemplate(template=template, input_variables=["text"]) llm_chain = LLMChain(prompt=prompt, llm=llm) return llm_chain chat = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) llm_chain = init_llama_model() st.set_page_config(page_title="HomeX Assistant", page_icon=":robot:") st.header("Hey, I'm your HomeX Assistant") if "sessionMessages" not in st.session_state: st.session_state.sessionMessages = [SystemMessage(content="You are a helpful assistant.")] if "messages" not in st.session_state: st.session_state.messages = [] if user_input := st.chat_input("Welcome Home,Say something"): assistant_input = get_response(user_input) # add user input to history st.session_state.messages.append({"role": "user", "content": user_input}) # add assistant input to history st.session_state.messages.append({"role": "assistant", "content": assistant_input}) # sentiment analysis sentiment = get_sentiment(user_input, llm_chain) if sentiment == "negative": rain( emoji="😭", font_size=30, # the size of emoji falling_speed=3, # speed of raining animation_length="infinite", # for how much time the animation will happen ) elif sentiment == "neutral": rain( emoji="😐", font_size=30, # the size of emoji falling_speed=3, # speed of raining animation_length="infinite", # for how much time the animation will happen ) elif sentiment == "positive": rain( emoji="🤩", font_size=30, # the size of emoji falling_speed=3, # speed of raining animation_length="infinite", # for how much time the animation will happen ) # display chat history for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"])