Chertushkin commited on
Commit
9abf1d0
1 Parent(s): edc8720
app.py CHANGED
@@ -1,7 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
1
+ """
2
+ Credit to Derek Thomas, derek@huggingface.co
3
+ """
4
+
5
+ import subprocess
6
+
7
+ subprocess.run(["pip", "install", "--upgrade", "transformers[torch,sentencepiece]==4.34.1"])
8
+
9
+ import logging
10
+ from pathlib import Path
11
+ from time import perf_counter
12
+
13
  import gradio as gr
14
+ from jinja2 import Environment, FileSystemLoader
15
+
16
+ from backend.query_llm import embed_docs, generate_hf, generate_openai
17
+ from backend.semantic_search import table, retriever
18
+
19
+ VECTOR_COLUMN_NAME = "embedding"
20
+ TEXT_COLUMN_NAME = "text"
21
+
22
+ proj_dir = Path(__file__).parent
23
+ # Setting up the logging
24
+ logging.basicConfig(level=logging.INFO)
25
+ logger = logging.getLogger(__name__)
26
+
27
+ # Set up the template environment with the templates directory
28
+ env = Environment(loader=FileSystemLoader(proj_dir / "templates"))
29
+
30
+ # Load the templates directly from the environment
31
+ template = env.get_template("template.j2")
32
+ template_html = env.get_template("template_html.j2")
33
+
34
+ # Examples
35
+ examples = [
36
+ "What is the capital of China?",
37
+ "Why is the sky blue?",
38
+ "Who won the mens world cup in 2014?",
39
+ ]
40
+
41
+
42
+ def add_text(history, text):
43
+ history = [] if history is None else history
44
+ history = history + [(text, None)]
45
+ return history, gr.Textbox(value="", interactive=False)
46
+
47
+
48
+ def bot(history, api_kind):
49
+ top_k_rank = 4
50
+ query = history[-1][0]
51
+
52
+ if not query:
53
+ gr.Warning("Please submit a non-empty string as a prompt")
54
+ raise ValueError("Empty string was submitted")
55
+
56
+ logger.warning("Retrieving documents...")
57
+ # Retrieve documents relevant to query
58
+ document_start = perf_counter()
59
+
60
+ query_vec = retriever.encode(query)
61
+ # print(query_vec)
62
+ # print(table)
63
+ # print('------')
64
+ documents = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_k_rank).to_list()
65
+ documents = [doc[TEXT_COLUMN_NAME] for doc in documents]
66
+ document_time = perf_counter() - document_start
67
+ logger.warning(f"Finished Retrieving documents in {round(document_time, 2)} seconds...")
68
+
69
+ # Create Prompt
70
+ prompt = template.render(documents=documents, query=query)
71
+ prompt_html = template_html.render(documents=documents, query=query)
72
+
73
+ if api_kind == "HuggingFace":
74
+ generate_fn = generate_hf
75
+ elif api_kind == "OpenAI":
76
+ generate_fn = generate_openai
77
+ elif api_kind is None:
78
+ gr.Warning("API name was not provided")
79
+ raise ValueError("API name was not provided")
80
+ else:
81
+ gr.Warning(f"API {api_kind} is not supported")
82
+ raise ValueError(f"API {api_kind} is not supported")
83
+
84
+ history[-1][1] = ""
85
+
86
+ for character in generate_fn(prompt, history[:-1]):
87
+ history[-1][1] = character
88
+ yield history, prompt_html
89
+
90
+
91
+ with gr.Blocks() as demo:
92
+ chatbot = gr.Chatbot(
93
+ [],
94
+ elem_id="chatbot",
95
+ avatar_images=(
96
+ "https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg",
97
+ "https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg",
98
+ ),
99
+ bubble_full_width=False,
100
+ show_copy_button=True,
101
+ show_share_button=True,
102
+ )
103
+
104
+ with gr.Row():
105
+ txt = gr.Textbox(
106
+ scale=3,
107
+ show_label=False,
108
+ placeholder="Enter text and press enter",
109
+ container=False,
110
+ )
111
+ txt_btn = gr.Button(value="Submit text", scale=1)
112
+
113
+ api_kind = gr.Radio(choices=["HuggingFace", "OpenAI"], value="HuggingFace")
114
+
115
+ prompt_html = gr.HTML()
116
+ # Turn off interactivity while generating if you click
117
+ txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
118
+ bot, [chatbot, api_kind], [chatbot, prompt_html]
119
+ )
120
+
121
+ # Turn it back on
122
+ txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
123
+
124
+ # Turn off interactivity while generating if you hit enter
125
+ txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
126
+ bot, [chatbot, api_kind], [chatbot, prompt_html]
127
+ )
128
+
129
+ # Turn it back on
130
+ txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
131
 
132
+ # Examples
133
+ gr.Examples(examples, txt)
134
 
135
+ demo.queue()
136
+ demo.launch(debug=True, share=True)
backend/query_llm.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import openai
3
+ import gradio as gr
4
+
5
+ from os import getenv
6
+ from typing import Any, Dict, Generator, List
7
+
8
+ from huggingface_hub import InferenceClient
9
+ from transformers import AutoTokenizer
10
+
11
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
12
+
13
+ temperature = 0.9
14
+ top_p = 0.6
15
+ repetition_penalty = 1.2
16
+
17
+ OPENAI_KEY = getenv("OPENAI_API_KEY")
18
+ HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
19
+
20
+ hf_client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1", token=HF_TOKEN)
21
+
22
+
23
+ def embed_docs(prompt: str, documents: List[str]):
24
+ context_template = """
25
+ I am giving you context from several documents. You goal is process the documents and use them in your answer. Here are the documents:
26
+ """
27
+ for i, doc in enumerate(documents):
28
+ context_template += "\n" + f"Document {i}:\n" + doc
29
+ context_template += "\n" + "Here is the question:\n" + prompt
30
+ return context_template
31
+
32
+
33
+ def format_prompt(message: str, api_kind: str):
34
+ """
35
+ Formats the given message using a chat template.
36
+
37
+ Args:
38
+ message (str): The user message to be formatted.
39
+
40
+ Returns:
41
+ str: Formatted message after applying the chat template.
42
+ """
43
+
44
+ # Create a list of message dictionaries with role and content
45
+ messages: List[Dict[str, Any]] = [{"role": "user", "content": message}]
46
+
47
+ if api_kind == "openai":
48
+ return messages
49
+ elif api_kind == "hf":
50
+ return tokenizer.apply_chat_template(messages, tokenize=False)
51
+ elif api_kind:
52
+ raise ValueError("API is not supported")
53
+
54
+
55
+ def generate_hf(
56
+ prompt: str,
57
+ history: str,
58
+ temperature: float = 0.9,
59
+ max_new_tokens: int = 256,
60
+ top_p: float = 0.95,
61
+ repetition_penalty: float = 1.0,
62
+ ) -> Generator[str, None, str]:
63
+ """
64
+ Generate a sequence of tokens based on a given prompt and history using Mistral client.
65
+
66
+ Args:
67
+ prompt (str): The initial prompt for the text generation.
68
+ history (str): Context or history for the text generation.
69
+ temperature (float, optional): The softmax temperature for sampling. Defaults to 0.9.
70
+ max_new_tokens (int, optional): Maximum number of tokens to be generated. Defaults to 256.
71
+ top_p (float, optional): Nucleus sampling probability. Defaults to 0.95.
72
+ repetition_penalty (float, optional): Penalty for repeated tokens. Defaults to 1.0.
73
+
74
+ Returns:
75
+ Generator[str, None, str]: A generator yielding chunks of generated text.
76
+ Returns a final string if an error occurs.
77
+ """
78
+ temperature = max(float(temperature), 1e-2) # Ensure temperature isn't too low
79
+ top_p = float(top_p)
80
+
81
+ generate_kwargs = {
82
+ "temperature": temperature,
83
+ "max_new_tokens": max_new_tokens,
84
+ "top_p": top_p,
85
+ "repetition_penalty": repetition_penalty,
86
+ "do_sample": True,
87
+ "seed": 42,
88
+ }
89
+
90
+ formatted_prompt = format_prompt(prompt, "hf")
91
+ print("FORMATTED PROMPT STARTED")
92
+ print("----------------")
93
+ print(formatted_prompt)
94
+ print("FORMATTED PROMPT ENDED")
95
+ print("----------------")
96
+ try:
97
+ stream = hf_client.text_generation(
98
+ formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False
99
+ )
100
+ output = ""
101
+ for response in stream:
102
+ output += response.token.text
103
+ yield output
104
+
105
+ except Exception as e:
106
+ if "Too Many Requests" in str(e):
107
+ print("ERROR: Too many requests on Mistral client")
108
+ gr.Warning("Unfortunately Mistral is unable to process")
109
+ return "Unfortunately, I am not able to process your request now."
110
+ elif "Authorization header is invalid" in str(e):
111
+ print("Authetification error:", str(e))
112
+ gr.Warning("Authentication error: HF token was either not provided or incorrect")
113
+ return "Authentication error"
114
+ else:
115
+ print("Unhandled Exception:", str(e))
116
+ gr.Warning("Unfortunately Mistral is unable to process")
117
+ return "I do not know what happened, but I couldn't understand you."
118
+
119
+
120
+ def generate_openai(
121
+ prompt: str,
122
+ history: str,
123
+ temperature: float = 0.9,
124
+ max_new_tokens: int = 256,
125
+ top_p: float = 0.95,
126
+ repetition_penalty: float = 1.0,
127
+ ) -> Generator[str, None, str]:
128
+ """
129
+ Generate a sequence of tokens based on a given prompt and history using Mistral client.
130
+
131
+ Args:
132
+ prompt (str): The initial prompt for the text generation.
133
+ history (str): Context or history for the text generation.
134
+ temperature (float, optional): The softmax temperature for sampling. Defaults to 0.9.
135
+ max_new_tokens (int, optional): Maximum number of tokens to be generated. Defaults to 256.
136
+ top_p (float, optional): Nucleus sampling probability. Defaults to 0.95.
137
+ repetition_penalty (float, optional): Penalty for repeated tokens. Defaults to 1.0.
138
+
139
+ Returns:
140
+ Generator[str, None, str]: A generator yielding chunks of generated text.
141
+ Returns a final string if an error occurs.
142
+ """
143
+
144
+ temperature = max(float(temperature), 1e-2) # Ensure temperature isn't too low
145
+ top_p = float(top_p)
146
+
147
+ generate_kwargs = {
148
+ "temperature": temperature,
149
+ "max_tokens": max_new_tokens,
150
+ "top_p": top_p,
151
+ "frequency_penalty": max(-2.0, min(repetition_penalty, 2.0)),
152
+ }
153
+
154
+ formatted_prompt = format_prompt(prompt, "openai")
155
+
156
+ try:
157
+ stream = openai.ChatCompletion.create(
158
+ model="gpt-3.5-turbo-0301", messages=formatted_prompt, **generate_kwargs, stream=True
159
+ )
160
+ output = ""
161
+ for chunk in stream:
162
+ output += chunk.choices[0].delta.get("content", "")
163
+ yield output
164
+
165
+ except Exception as e:
166
+ if "Too Many Requests" in str(e):
167
+ print("ERROR: Too many requests on OpenAI client")
168
+ gr.Warning("Unfortunately OpenAI is unable to process")
169
+ return "Unfortunately, I am not able to process your request now."
170
+ elif "You didn't provide an API key" in str(e):
171
+ print("Authetification error:", str(e))
172
+ gr.Warning("Authentication error: OpenAI key was either not provided or incorrect")
173
+ return "Authentication error"
174
+ else:
175
+ print("Unhandled Exception:", str(e))
176
+ gr.Warning("Unfortunately OpenAI is unable to process")
177
+ return "I do not know what happened, but I couldn't understand you."
backend/semantic_search.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import lancedb
3
+ import os
4
+ from pathlib import Path
5
+ from sentence_transformers import SentenceTransformer
6
+
7
+ # EMB_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
8
+ EMB_MODEL_NAME = "jinaai/jina-embeddings-v2-base-en"
9
+ DB_TABLE_NAME = "chunks"
10
+
11
+ # Setting up the logging
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+ retriever = SentenceTransformer(EMB_MODEL_NAME)
15
+
16
+ # db
17
+ db_uri = os.path.join(Path(__file__).parents[1], ".lancedb")
18
+ db = lancedb.connect(db_uri)
19
+ table = db.open_table(DB_TABLE_NAME)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # transformers[torch,sentencepiece]==4.34.1
2
+ wikiextractor==3.0.6
3
+ sentence-transformers>2.2.0
4
+ ipywidgets==8.1.1
5
+ tqdm==4.66.1
6
+ aiohttp==3.8.6
7
+ huggingface-hub==0.17.3
8
+ lancedb==0.3.1
9
+ openai==0.28
templates/template.j2 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ Instructions: Use the following unique documents in the Context section to answer the Query at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
2
+ Context:
3
+ {% for doc in documents %}
4
+ ---
5
+ {{ doc }}
6
+ {% endfor %}
7
+ ---
8
+ Query: {{ query }}
templates/template_html.j2 ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Information Page</title>
7
+ <link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&amp;display=swap">
8
+ <link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&amp;display=swap">
9
+ <style>
10
+ * {
11
+ font-family: "Source Sans Pro";
12
+ }
13
+
14
+ .instructions > * {
15
+ color: #111 !important;
16
+ }
17
+
18
+ details.doc-box * {
19
+ color: #111 !important;
20
+ }
21
+
22
+ .dark {
23
+ background: #111;
24
+ color: white;
25
+ }
26
+
27
+ .doc-box {
28
+ padding: 10px;
29
+ margin-top: 10px;
30
+ background-color: #baecc2;
31
+ border-radius: 6px;
32
+ color: #111 !important;
33
+ max-width: 700px;
34
+ box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
35
+ }
36
+
37
+ .doc-full {
38
+ margin: 10px 14px;
39
+ line-height: 1.6rem;
40
+ }
41
+
42
+ .instructions {
43
+ color: #111 !important;
44
+ background: #b7bdfd;
45
+ display: block;
46
+ border-radius: 6px;
47
+ padding: 6px 10px;
48
+ line-height: 1.6rem;
49
+ max-width: 700px;
50
+ box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
51
+ }
52
+
53
+ .query {
54
+ color: #111 !important;
55
+ background: #ffbcbc;
56
+ display: block;
57
+ border-radius: 6px;
58
+ padding: 6px 10px;
59
+ line-height: 1.6rem;
60
+ max-width: 700px;
61
+ box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
62
+ }
63
+ </style>
64
+ </head>
65
+ <body>
66
+ <div class="prose svelte-1ybaih5" id="component-6">
67
+ <h2>Prompt</h2>
68
+ Below is the prompt that is given to the model. <hr>
69
+ <h2>Instructions</h2>
70
+ <span class="instructions">Use the following pieces of context to answer the question at the end.<br>If you don't know the answer, just say that you don't know, <span style="font-weight: bold;">don't try to make up an answer.</span></span><br>
71
+ <h2>Context</h2>
72
+ {% for doc in documents %}
73
+ <details class="doc-box">
74
+ <summary>
75
+ <b>Doc {{ loop.index }}:</b> <span class="doc-short">{{ doc[:100] }}...</span>
76
+ </summary>
77
+ <div class="doc-full">{{ doc }}</div>
78
+ </details>
79
+ {% endfor %}
80
+
81
+ <h2>Query</h2>
82
+ <span class="query">{{ query }}</span>
83
+ </div>
84
+
85
+ <script>
86
+ document.addEventListener("DOMContentLoaded", function() {
87
+ const detailsElements = document.querySelectorAll('.doc-box');
88
+
89
+ detailsElements.forEach(detail => {
90
+ detail.addEventListener('toggle', function() {
91
+ const docShort = this.querySelector('.doc-short');
92
+ if (this.open) {
93
+ docShort.style.display = 'none';
94
+ } else {
95
+ docShort.style.display = 'inline';
96
+ }
97
+ });
98
+ });
99
+ });
100
+ </script>
101
+ </body>
102
+ </html>