File size: 6,579 Bytes
cbfb0d8
200c107
cbfb0d8
 
1a71694
cca16bb
cbfb0d8
200c107
cbfb0d8
9728145
200c107
90a4512
cbfb0d8
79036b4
 
 
 
cbfb0d8
 
 
 
 
79036b4
cbfb0d8
 
 
79036b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbfb0d8
 
 
200c107
 
 
 
 
 
 
cbfb0d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec49b83
 
cbfb0d8
 
 
 
 
 
200c107
cbfb0d8
 
 
bf56939
cbfb0d8
bbfde01
bf56939
 
a5a8f2b
43bd4b8
200c107
 
cbfb0d8
 
43bd4b8
200c107
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import streamlit as st
from huggingface_hub import InferenceClient
import os
import sys

st.title("ChatGPT-like Chatbot")

base_url="https://api-inference.huggingface.co/models/"

API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
headers = {"Authorization":"Bearer "+API_KEY}

model_links ={
    "Mistral-7B":base_url+"mistralai/Mistral-7B-Instruct-v0.2",
    "Mistral-22B":base_url+"mistral-community/Mixtral-8x22B-v0.1",
    # "Gemma-2B":base_url+"google/gemma-2b-it",
    # "Zephyr-7B-β":base_url+"HuggingFaceH4/zephyr-7b-beta",
    # "Llama-2":"meta-llama/Llama-2-7b-chat-hf"
}

#Pull info about the model to display
model_info ={
    "Mistral-7B":
        {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
            \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over  **7 billion parameters.** \n""",
        'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
    "Mistral-22B":
        {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
            \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-22b/) team as has over  **22 billion parameters.** \n""",
        'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'}

    # "Gemma-7B":        
    #     {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
    #         \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over  **7 billion parameters.** \n""",
    #     'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
    # "Gemma-2B":        
    # {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
    #     \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over  **2 billion parameters.** \n""",
    # 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
    # "Zephyr-7B":        
    # {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
    #     \nFrom Huggingface: \n\
    #     Zephyr is a series of language models that are trained to act as helpful assistants. \
    #     [Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
    #     is the third model in the series, and is a fine-tuned version of google/gemma-7b \
    #     that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
    # 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
    # "Zephyr-7B-β":        
    # {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
    #     \nFrom Huggingface: \n\
    #     Zephyr is a series of language models that are trained to act as helpful assistants. \
    #     [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
    #     is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
    #     that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
    # 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},

}

def format_promt(message, custom_instructions=None):
    prompt = ""
    if custom_instructions:
        prompt += f"[INST] {custom_instructions} [/INST]"
    prompt += f"[INST] {message} [/INST]"
    return prompt

def reset_conversation():
    '''
    Resets Conversation
    '''
    st.session_state.conversation = []
    st.session_state.messages = []
    return None

models =[key for key in model_links.keys()]

# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select Model", models)

#Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))

#Add reset button to clear conversation
st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button

# Create model description
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown(model_info[selected_model]['description'])
st.sidebar.image(model_info[selected_model]['logo'])
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")

if "prev_option" not in st.session_state:
    st.session_state.prev_option = selected_model

if st.session_state.prev_option != selected_model:
    st.session_state.messages = []
    # st.write(f"Changed to {selected_model}")
    st.session_state.prev_option = selected_model
    reset_conversation()

#Pull in the model we want to use
repo_id = model_links[selected_model]

st.subheader(f'AI - {selected_model}')
# st.title(f'ChatBot Using {selected_model}')

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])


# Accept user input
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):

    custom_instruction = "Act like a Human in conversation"

    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

    formated_text = format_promt(prompt, custom_instruction)

    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        client = InferenceClient(
            model=model_links[selected_model],
            headers=headers)

        output = client.text_generation(
            formated_text,
            temperature=temp_values,#0.5
            max_new_tokens=3000,
            stream=True
        )

        response = st.write_stream(output)
    st.session_state.messages.append({"role": "assistant", "content": response})