File size: 3,825 Bytes
02fc79d
 
7e0679d
933893f
cfb47bd
02fc79d
d19cbee
 
933893f
 
 
02fc79d
d19cbee
933893f
 
 
 
d19cbee
 
d96f124
 
ea90c92
d96f124
743c19d
df93186
743c19d
d96f124
d19cbee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ea90c92
 
98cb8e3
ea90c92
 
 
 
 
 
844418d
ea90c92
 
 
98cb8e3
 
d19cbee
844418d
98cb8e3
1d0c984
 
 
 
 
 
9185ca3
64e9c35
98cb8e3
 
d19cbee
98cb8e3
 
 
 
 
 
 
 
 
 
 
81f5fb5
 
 
 
 
98cb8e3
199ae1f
b24a356
98cb8e3
 
 
844418d
98cb8e3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import gradio as gr
from gpt4all import GPT4All
from urllib.request import urlopen
import json
import time


# populate all models available from GPT4All
url = "https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json"
response = urlopen(url)
data_json = json.loads(response.read())


def model_choices():
    model_list = [data_json[i]['filename'] for i in range(len(data_json))]
    return model_list

    
# get each models' description
model_description = {model['filename']: model['description'] for model in data_json}


def llm_intro(selected_model):
    html_string = model_description.get(selected_model, "No description available for this model selection.")
    formatted_description = html_string.replace("<ul>", "").replace("</ul>", "").replace("</li>", "").replace("<br>", "\n").replace("</br>", "").replace("<li>", "\n➤ ")
    return formatted_description


def remove_endtags(html_string, tags):
    """Remove rear HTML tags from the input string."""
    for tag in tags:
        html_string = re.sub(fr"</{tag}>", "", html_string)
    return html_string


def replace_starttags(html_string, replacements):
    """Replace starting HTML tags with the corresponding values."""
    for tag, replacement in replacements.items():
        html_string = html_string.replace(tag, replacement)
    return html_string


def format_html_string(html_string):
    """Format the HTML string to a readable text format."""
    tags_to_remove = ["ul", "li", "br"]
    html_string = remove_endtags(html_string, tags_to_remove)

    tag_replacements = {
        "<li>": "\n➤ ",
        "<br>": "\n",
        "<strong>": "**",
        "</strong>": "**"
    }
    formatted_string = replace_starttags(html_string, tag_replacements)

    return formatted_string

    
# cache models for faster reloads
model_cache = {}  


def load_model(model_name):
    """
    This function checks the cache before loading a model.
    If the model is cached, it returns the cached version.
    Otherwise, it loads the model, caches it, and then returns it.
    """
    if model_name not in model_cache:
        model = GPT4All(model_name)  
        model_cache[model_name] = model
    return model_cache[model_name]

#     clear = gr.ClearButton([input_text, chatbot])

# Construct chatbot
def generate_response(model_name, message, chat_history):
    model = load_model(model_name)
    chat_history = []
    if len(chat_history) > 0:
        past_chat = ", ".join(chat_history)
        input_text = past_chat + " " + message
    else:
        input_text = message
    response = model.generate(input_text, max_tokens=100)
    chat_history.append((input_text, response))
    return chat_history, response

# Create Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("# GPT4All Chatbot")
    with gr.Row():
        with gr.Column(scale=1):
            model_dropdown = gr.Dropdown(
                choices=model_choices(),  
                multiselect=False,
                type="value",
                value="orca-mini-3b-gguf2-q4_0.gguf",
                label="LLMs to choose from"
            )
            explanation = gr.Textbox(label="Model Description", lines=3, interactive=False, value=llm_intro("orca-mini-3b-gguf2-q4_0.gguf"))
    
            # Link the dropdown with the textbox to update the description based on the selected model
            model_dropdown.change(fn=llm_intro, inputs=model_dropdown, outputs=explanation)
            
        with gr.Column(scale=4):
            chatbot = gr.Chatbot(label="Chatroom", value=[(None, "How may I help you today?")])
            
    message = gr.Textbox(label="Message")
    state = gr.State()

    message.submit(generate_response, inputs=[model_dropdown, message, state], outputs=[chatbot, state])

# Launch the Gradio app
demo.launch()