healthy / pages /Phase1.py
peterciank's picture
Create pages/Phase1.py
f0fa217 verified
raw
history blame
No virus
4.25 kB
import streamlit as st
from huggingface_hub import InferenceClient
import os
import fitz # PyMuPDF
st.title("ChatGPT-like Chatbot")
base_url = "https://api-inference.huggingface.co/models/"
API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
headers = {"Authorization": "Bearer " + str(API_KEY)}
model_links = {
"Mistral-7B": base_url + "mistralai/Mistral-7B-Instruct-v0.2",
"Mistral-22B": base_url + "mistral-community/Mixtral-8x22B-v0.1",
}
model_info = {
"Mistral-7B": {
'description': """The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
'logo': 'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
"Zephyr-7B": {
'description': """The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
\nFrom Huggingface: \n\
Zephyr is a series of language models that are trained to act as helpful assistants. \
[Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
is the third model in the series, and is a fine-tuned version of google/gemma-7b \
that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
'logo': 'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'}
}
def format_prompt(message, custom_instructions=None):
prompt = ""
if custom_instructions:
prompt += f"[INST] {custom_instructions} [/INST]"
prompt += f"[INST] {message} [/INST]"
return prompt
def reset_conversation():
st.session_state.conversation = []
st.session_state.messages = []
return None
def read_pdf(file_path):
doc = fitz.open(file_path)
text = ""
for page in doc:
text += page.get_text()
return text
models = [key for key in model_links.keys()]
selected_model = st.sidebar.selectbox("Select Model", models)
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
st.sidebar.button('Reset Chat', on_click=reset_conversation)
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown(model_info[selected_model]['description'])
st.sidebar.image(model_info[selected_model]['logo'])
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
st.session_state.prev_option = selected_model
reset_conversation()
repo_id = model_links[selected_model]
st.subheader(f'AI - {selected_model}')
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
pdf_path = st.file_uploader("Upload a PDF file", type="pdf")
if pdf_path:
pdf_text = read_pdf(pdf_path)
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
custom_instruction = "Act like a Human in conversation"
with st.chat_message("user"):
st.markdown(prompt st.session_state.messages.append({"role": "user", "content": prompt})
formatted_prompt = format_prompt(prompt, custom_instruction)
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
client = InferenceClient(model=repo_id, token=API_KEY)
if pdf_path:
# If a PDF is uploaded, use its text for answering
formatted_prompt = f"{pdf_text}\n\n{formatted_prompt}"
response = client.text_generation(
formatted_prompt,
max_length=500,
temperature=temp_values
)
response_content = response["generated_text"]
st.markdown(response_content)
st.session_state.messages.append({"role": "assistant", "content": response_content})