chatbot-hf / home.py
ebiz
changes
95744d9
raw
history blame
3.65 kB
import utils
import streamlit as st
from streaming import StreamHandler
import torch
# from LLM import OpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.llms import HuggingFaceHub
from transformers import AutoConfig
from langchain.llms import HuggingFaceHub
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from langchain.llms import HuggingFacePipeline
from langchain.chat_models import ChatOllama
import pandas as pd
import requests
import tempfile
import os
st.set_page_config(page_title="Chatbot", page_icon="πŸ’¬")
st.header('Ebiz Chatbot')
st.write('Allows users to interact with the LLM')
# st.write('[![view source code ](https://img.shields.io/badge/view_source_code-gray?logo=github)](https://github.com/shashankdeshpande/langchain-chatbot/blob/master/pages/1_%F0%9F%92%AC_basic_chatbot.py)')
# Using "with" notation
with st.sidebar:
ktopVal= st.slider(
'K-top',
0.0, 1.0,step=0.1)
ptopVal=st.slider(
'P-top',
0.0, 1.0,step=0.1)
tempVal=st.slider(
'Temperature',
0.0, 1.0, step=0.1)
uploaded_file = st.file_uploader("Choose a file")
submit_button = st.button("Submit")
if submit_button:
# Display the file details
st.write("File Details:")
st.write(f"Filename: {uploaded_file.name}")
st.write(f"File Type: {uploaded_file.type}")
st.write(f"File Size: {uploaded_file.size} bytes")
# Send file to Flask server
url = "http://127.0.0.1:5000/upload"
files = {'file': (uploaded_file.name, uploaded_file, uploaded_file.type)}
payload = {'ktopVal':ktopVal,"ptopVal":ptopVal,"tempVal":tempVal}
response = requests.post(url, files=files,data=payload)
# Display the response from the server
st.write("Server Response:")
st.write(response.text)
class Basic:
def __init__(self):
utils.configure_openai_api_key()
self.openai_model = "gpt-3.5-turbo"
def setup_chain(self):
# config = AutoConfig.from_pretrained('stabilityai/stablelm-3b-4e1t', trust_remote_code=True)
model_name = 'stabilityai/stablelm-3b-4e1t'
torch_dtype = "float16" #@param ["float16", "bfloat16", "float"]
load_in_8bit = False #@param {type:"boolean"}
device_map = "auto"
# tokenizer = AutoTokenizer.from_pretrained(model_name,trust_remote_code=True)
# model = AutoModelForCausalLM.from_pretrained(
# model_name,
# torch_dtype=getattr(torch, torch_dtype),
# load_in_8bit=load_in_8bit,
# device_map=device_map,
# trust_remote_code=True,
# offload_folder="./offload",
# )
# pipe = pipeline(
# "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=256
# )
llm = ChatOllama(model="llama2")
memory = ConversationBufferMemory()
chain = ConversationChain(llm=llm, memory=memory, verbose=True)
return chain
@utils.enable_chat_history
def main(self):
chain = self.setup_chain()
user_query = st.chat_input(placeholder="Ask me anything!")
if user_query:
utils.display_msg(user_query, 'user')
with st.chat_message("assistant"):
st_cb = StreamHandler(st.empty())
response = chain(user_query, callbacks=[st_cb])
st.session_state.messages.append({"role": "assistant", "content": response['response']})
if __name__ == "__main__":
obj = Basic()
obj.main()