Spaces:
Runtime error
Runtime error
rameshmoorthy
commited on
Commit
β’
e1d216e
1
Parent(s):
f96a19b
Upload 4 files
Browse files- backend/query_llm.py +160 -0
- backend/semantic_search.py +27 -0
- templates/template.j2 +8 -0
- templates/template_html.j2 +102 -0
backend/query_llm.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
import openai
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
from os import getenv
|
7 |
+
from typing import Any, Dict, Generator, List
|
8 |
+
|
9 |
+
from huggingface_hub import InferenceClient
|
10 |
+
from transformers import AutoTokenizer
|
11 |
+
|
12 |
+
#tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
|
13 |
+
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
14 |
+
temperature = 0.5
|
15 |
+
top_p = 0.7
|
16 |
+
repetition_penalty = 1.2
|
17 |
+
|
18 |
+
OPENAI_KEY = getenv("OPENAI_API_KEY")
|
19 |
+
HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
|
20 |
+
|
21 |
+
#hf_client = InferenceClient(
|
22 |
+
# "mistralai/Mistral-7B-Instruct-v0.1",
|
23 |
+
# token=HF_TOKEN
|
24 |
+
# )
|
25 |
+
|
26 |
+
|
27 |
+
hf_client = InferenceClient(
|
28 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
29 |
+
token=HF_TOKEN
|
30 |
+
)
|
31 |
+
def format_prompt(message: str, api_kind: str):
|
32 |
+
"""
|
33 |
+
Formats the given message using a chat template.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
message (str): The user message to be formatted.
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
str: Formatted message after applying the chat template.
|
40 |
+
"""
|
41 |
+
|
42 |
+
# Create a list of message dictionaries with role and content
|
43 |
+
messages: List[Dict[str, Any]] = [{'role': 'user', 'content': message}]
|
44 |
+
|
45 |
+
if api_kind == "openai":
|
46 |
+
return messages
|
47 |
+
elif api_kind == "hf":
|
48 |
+
return tokenizer.apply_chat_template(messages, tokenize=False)
|
49 |
+
elif api_kind:
|
50 |
+
raise ValueError("API is not supported")
|
51 |
+
|
52 |
+
|
53 |
+
def generate_hf(prompt: str, history: str, temperature: float = 0.5, max_new_tokens: int = 4000,
|
54 |
+
top_p: float = 0.95, repetition_penalty: float = 1.0) -> Generator[str, None, str]:
|
55 |
+
"""
|
56 |
+
Generate a sequence of tokens based on a given prompt and history using Mistral client.
|
57 |
+
|
58 |
+
Args:
|
59 |
+
prompt (str): The initial prompt for the text generation.
|
60 |
+
history (str): Context or history for the text generation.
|
61 |
+
temperature (float, optional): The softmax temperature for sampling. Defaults to 0.9.
|
62 |
+
max_new_tokens (int, optional): Maximum number of tokens to be generated. Defaults to 256.
|
63 |
+
top_p (float, optional): Nucleus sampling probability. Defaults to 0.95.
|
64 |
+
repetition_penalty (float, optional): Penalty for repeated tokens. Defaults to 1.0.
|
65 |
+
|
66 |
+
Returns:
|
67 |
+
Generator[str, None, str]: A generator yielding chunks of generated text.
|
68 |
+
Returns a final string if an error occurs.
|
69 |
+
"""
|
70 |
+
|
71 |
+
temperature = max(float(temperature), 1e-2) # Ensure temperature isn't too low
|
72 |
+
top_p = float(top_p)
|
73 |
+
|
74 |
+
generate_kwargs = {
|
75 |
+
'temperature': temperature,
|
76 |
+
'max_new_tokens': max_new_tokens,
|
77 |
+
'top_p': top_p,
|
78 |
+
'repetition_penalty': repetition_penalty,
|
79 |
+
'do_sample': True,
|
80 |
+
'seed': 42,
|
81 |
+
}
|
82 |
+
|
83 |
+
formatted_prompt = format_prompt(prompt, "hf")
|
84 |
+
|
85 |
+
try:
|
86 |
+
stream = hf_client.text_generation(formatted_prompt, **generate_kwargs,
|
87 |
+
stream=True, details=True, return_full_text=False)
|
88 |
+
output = ""
|
89 |
+
for response in stream:
|
90 |
+
output += response.token.text
|
91 |
+
yield output
|
92 |
+
|
93 |
+
except Exception as e:
|
94 |
+
if "Too Many Requests" in str(e):
|
95 |
+
print("ERROR: Too many requests on Mistral client")
|
96 |
+
gr.Warning("Unfortunately Mistral is unable to process")
|
97 |
+
return "Unfortunately, I am not able to process your request now."
|
98 |
+
elif "Authorization header is invalid" in str(e):
|
99 |
+
print("Authetification error:", str(e))
|
100 |
+
gr.Warning("Authentication error: HF token was either not provided or incorrect")
|
101 |
+
return "Authentication error"
|
102 |
+
else:
|
103 |
+
print("Unhandled Exception:", str(e))
|
104 |
+
gr.Warning("Unfortunately Mistral is unable to process")
|
105 |
+
return "I do not know what happened, but I couldn't understand you."
|
106 |
+
|
107 |
+
|
108 |
+
def generate_openai(prompt: str, history: str, temperature: float = 0.9, max_new_tokens: int = 256,
|
109 |
+
top_p: float = 0.95, repetition_penalty: float = 1.0) -> Generator[str, None, str]:
|
110 |
+
"""
|
111 |
+
Generate a sequence of tokens based on a given prompt and history using Mistral client.
|
112 |
+
|
113 |
+
Args:
|
114 |
+
prompt (str): The initial prompt for the text generation.
|
115 |
+
history (str): Context or history for the text generation.
|
116 |
+
temperature (float, optional): The softmax temperature for sampling. Defaults to 0.9.
|
117 |
+
max_new_tokens (int, optional): Maximum number of tokens to be generated. Defaults to 256.
|
118 |
+
top_p (float, optional): Nucleus sampling probability. Defaults to 0.95.
|
119 |
+
repetition_penalty (float, optional): Penalty for repeated tokens. Defaults to 1.0.
|
120 |
+
|
121 |
+
Returns:
|
122 |
+
Generator[str, None, str]: A generator yielding chunks of generated text.
|
123 |
+
Returns a final string if an error occurs.
|
124 |
+
"""
|
125 |
+
|
126 |
+
temperature = max(float(temperature), 1e-2) # Ensure temperature isn't too low
|
127 |
+
top_p = float(top_p)
|
128 |
+
|
129 |
+
generate_kwargs = {
|
130 |
+
'temperature': temperature,
|
131 |
+
'max_tokens': max_new_tokens,
|
132 |
+
'top_p': top_p,
|
133 |
+
'frequency_penalty': max(-2., min(repetition_penalty, 2.)),
|
134 |
+
}
|
135 |
+
|
136 |
+
formatted_prompt = format_prompt(prompt, "openai")
|
137 |
+
|
138 |
+
try:
|
139 |
+
stream = openai.ChatCompletion.create(model="gpt-3.5-turbo-0301",
|
140 |
+
messages=formatted_prompt,
|
141 |
+
**generate_kwargs,
|
142 |
+
stream=True)
|
143 |
+
output = ""
|
144 |
+
for chunk in stream:
|
145 |
+
output += chunk.choices[0].delta.get("content", "")
|
146 |
+
yield output
|
147 |
+
|
148 |
+
except Exception as e:
|
149 |
+
if "Too Many Requests" in str(e):
|
150 |
+
print("ERROR: Too many requests on OpenAI client")
|
151 |
+
gr.Warning("Unfortunately OpenAI is unable to process")
|
152 |
+
return "Unfortunately, I am not able to process your request now."
|
153 |
+
elif "You didn't provide an API key" in str(e):
|
154 |
+
print("Authetification error:", str(e))
|
155 |
+
gr.Warning("Authentication error: OpenAI key was either not provided or incorrect")
|
156 |
+
return "Authentication error"
|
157 |
+
else:
|
158 |
+
print("Unhandled Exception:", str(e))
|
159 |
+
gr.Warning("Unfortunately OpenAI is unable to process")
|
160 |
+
return "I do not know what happened, but I couldn't understand you."
|
backend/semantic_search.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import logging
|
3 |
+
import lancedb
|
4 |
+
import os
|
5 |
+
from pathlib import Path
|
6 |
+
from sentence_transformers import SentenceTransformer
|
7 |
+
#from FlagEmbedding import LLMEmbedder, FlagReranker # Al document present here https://github.com/FlagOpen/FlagEmbedding/tree/master
|
8 |
+
#EMB_MODEL_NAME = "thenlper/gte-base"
|
9 |
+
EMB_MODEL_NAME = 'BAAI/llm-embedder'
|
10 |
+
task = "qa" # Encode for a specific task (qa, icl, chat, lrlm, tool, convsearch)
|
11 |
+
#EMB_MODEL_NAME = LLMEmbedder('BAAI/llm-embedder', use_fp16=False) # Load model (automatically use GPUs)
|
12 |
+
|
13 |
+
#reranker_model = FlagReranker('BAAI/bge-reranker-base', use_fp16=True) # use_fp16 speeds up computation with a slight performance degradation
|
14 |
+
|
15 |
+
|
16 |
+
#EMB_MODEL_NAME = "thenlper/gte-base"
|
17 |
+
#DB_TABLE_NAME = "Huggingface_docs"
|
18 |
+
DB_TABLE_NAME = "Expenditure_"
|
19 |
+
# Setting up the logging
|
20 |
+
logging.basicConfig(level=logging.INFO)
|
21 |
+
logger = logging.getLogger(__name__)
|
22 |
+
retriever = SentenceTransformer(EMB_MODEL_NAME)
|
23 |
+
|
24 |
+
# db
|
25 |
+
db_uri = os.path.join(Path(__file__).parents[1], ".lancedb1")
|
26 |
+
db = lancedb.connect(db_uri)
|
27 |
+
table = db.open_table(DB_TABLE_NAME)
|
templates/template.j2
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Instructions: You are assistant to Expenditure Observer.You have to answer the questions of Expenditure observer.Use the following unique documents in the Context section to answer the Query at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
2 |
+
Context:
|
3 |
+
{% for doc in documents %}
|
4 |
+
---
|
5 |
+
{{ doc }}
|
6 |
+
{% endfor %}
|
7 |
+
---
|
8 |
+
Query: {{ query }}
|
templates/template_html.j2
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Information Page</title>
|
7 |
+
<link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap">
|
8 |
+
<link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap">
|
9 |
+
<style>
|
10 |
+
* {
|
11 |
+
font-family: "Source Sans Pro";
|
12 |
+
}
|
13 |
+
|
14 |
+
.instructions > * {
|
15 |
+
color: #111 !important;
|
16 |
+
}
|
17 |
+
|
18 |
+
details.doc-box * {
|
19 |
+
color: #111 !important;
|
20 |
+
}
|
21 |
+
|
22 |
+
.dark {
|
23 |
+
background: #111;
|
24 |
+
color: white;
|
25 |
+
}
|
26 |
+
|
27 |
+
.doc-box {
|
28 |
+
padding: 10px;
|
29 |
+
margin-top: 10px;
|
30 |
+
background-color: #baecc2;
|
31 |
+
border-radius: 6px;
|
32 |
+
color: #111 !important;
|
33 |
+
max-width: 700px;
|
34 |
+
box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
|
35 |
+
}
|
36 |
+
|
37 |
+
.doc-full {
|
38 |
+
margin: 10px 14px;
|
39 |
+
line-height: 1.6rem;
|
40 |
+
}
|
41 |
+
|
42 |
+
.instructions {
|
43 |
+
color: #111 !important;
|
44 |
+
background: #b7bdfd;
|
45 |
+
display: block;
|
46 |
+
border-radius: 6px;
|
47 |
+
padding: 6px 10px;
|
48 |
+
line-height: 1.6rem;
|
49 |
+
max-width: 700px;
|
50 |
+
box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
|
51 |
+
}
|
52 |
+
|
53 |
+
.query {
|
54 |
+
color: #111 !important;
|
55 |
+
background: #ffbcbc;
|
56 |
+
display: block;
|
57 |
+
border-radius: 6px;
|
58 |
+
padding: 6px 10px;
|
59 |
+
line-height: 1.6rem;
|
60 |
+
max-width: 700px;
|
61 |
+
box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
|
62 |
+
}
|
63 |
+
</style>
|
64 |
+
</head>
|
65 |
+
<body>
|
66 |
+
<div class="prose svelte-1ybaih5" id="component-6">
|
67 |
+
<h2>Prompt</h2>
|
68 |
+
Below is the prompt that is given to the model. <hr>
|
69 |
+
<h2>Instructions</h2>
|
70 |
+
<span class="instructions">You are assistant to Expenditure observer and you have to answer questions by Expenditure observer.Use the following pieces of context to answer the question at the end.<br>If you don't know the answer, just say that you don't know, <span style="font-weight: bold;">don't try to make up an answer.</span></span><br>
|
71 |
+
<h2>Context</h2>
|
72 |
+
{% for doc in documents %}
|
73 |
+
<details class="doc-box">
|
74 |
+
<summary>
|
75 |
+
<b>Doc {{ loop.index }}:</b> <span class="doc-short">{{ doc[:100] }}...</span>
|
76 |
+
</summary>
|
77 |
+
<div class="doc-full">{{ doc }}</div>
|
78 |
+
</details>
|
79 |
+
{% endfor %}
|
80 |
+
|
81 |
+
<h2>Query</h2>
|
82 |
+
<span class="query">{{ query }}</span>
|
83 |
+
</div>
|
84 |
+
|
85 |
+
<script>
|
86 |
+
document.addEventListener("DOMContentLoaded", function() {
|
87 |
+
const detailsElements = document.querySelectorAll('.doc-box');
|
88 |
+
|
89 |
+
detailsElements.forEach(detail => {
|
90 |
+
detail.addEventListener('toggle', function() {
|
91 |
+
const docShort = this.querySelector('.doc-short');
|
92 |
+
if (this.open) {
|
93 |
+
docShort.style.display = 'none';
|
94 |
+
} else {
|
95 |
+
docShort.style.display = 'inline';
|
96 |
+
}
|
97 |
+
});
|
98 |
+
});
|
99 |
+
});
|
100 |
+
</script>
|
101 |
+
</body>
|
102 |
+
</html>
|