Spaces:
Runtime error
Update app.py
Browse filesfrom datasets import load_dataset
from datasets import Dataset
from sentence_transformers import SentenceTransformer
import faiss
import time
import json
#import torch
import pandas as pd
from llama_cpp import Llama
#from langchain_community.llms import LlamaCpp
from threading import Thread
from huggingface_hub import Repository, upload_file
import os
HF_TOKEN = os.getenv('HF_Token')
#Log_Path="./Logfolder"
logfile = 'DiabetesChatLog.txt'
historylog = [{
"Prompt": '',
"Output": ''
}]
data = load_dataset("Namitg02/Test", split='train', streaming=False)
#Returns a list of dictionaries, each representing a row in the dataset.
length = len(data)
embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
embedding_dim = embedding_model.get_sentence_embedding_dimension()
# Returns dimensions of embedidng
index = faiss.IndexFlatL2(embedding_dim)
data.add_faiss_index("embeddings", custom_index=index)
# adds an index column for the embeddings
#question = "How can I reverse Diabetes?"
SYS_PROMPT = """You are an assistant for answering questions.
You are given the extracted parts of documents and a question. Provide a conversational answer.
If you don't know the answer, just say "I do not know." Don't make up an answer. Don't repeat the SYS_PROMPT."""
# Provides context of how to answer the question
#llm_model = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF", tinyllama-1.1b-chat-v1.0.Q5_K_M.gguf
# TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF , TinyLlama/TinyLlama-1.1B-Chat-v0.6, andrijdavid/TinyLlama-1.1B-Chat-v1.0-GGUF"
model = Llama(
model_path="./llama-2-7b-chat.Q4_K_M.gguf",
# chat_format="llama-2",
n_gpu_layers = 0,
temperature=0.75,
n_ctx = 4096,
max_tokens=500,
top_p=0.95 #,
# eos_tokens=terminators
# callback_manager=callback_manager,
# verbose=True, # Verbose is required to pass to the callback manager
)
#initiate model and tokenizer
def search(query: str, k: int = 2 ):
"""a function that embeds a new query and returns the most probable results"""
embedded_query = embedding_model.encode(query) # create embedding of a new query
scores, retrieved_examples = data.get_nearest_examples( # retrieve results
"embeddings", embedded_query, # compare our new embedded query with the dataset embeddings
k=k # get only top k results
)
return scores, retrieved_examples
# returns scores (List[float]): the retrieval scores from either FAISS (IndexFlatL2 by default) and examples (dict) format
# called by talk function that passes prompt
def format_prompt(prompt,retrieved_documents,k,history,memory_limit=3):
"""using the retrieved documents we will prompt the model to generate our responses"""
PROMPT = f"Question:{prompt}\nContext:"
for idx in range(k) :
PROMPT+= f"{retrieved_documents['0'][idx]}\n"
print("historyinfo")
print(history)
if len(history) == 0:
return PROMPT
if len(history) > memory_limit:
history = history[-memory_limit:]
print("checkwohist")
# PROMPT = PROMPT + f"{history[0][0]} [/INST] {history[0][1]} </s>"
# print("checkwthhist")
# print(PROMPT)
# Handle conversation history
for user_message, bot_message in history[0:]:
PROMPT += f"<s>[INST] {user_message} [/INST] {bot_message} </s>"
print("checkwthhist2")
print(PROMPT)
return PROMPT
# Called by talk function to add retrieved documents to the prompt. Keeps adding text of retrieved documents to string that are retreived
def talk(prompt, history):
k = 2 # number of retrieved documents
scores , retrieved_documents = search(prompt, k) # get retrival scores and examples in dictionary format based on the prompt passed
print(retrieved_documents.keys())
# print("check4")
formatted_prompt = format_prompt(prompt,retrieved_documents,k,history,memory_limit=3) # create a new prompt using the retrieved documents
print("check5")
pd.options.display.max_colwidth = 4000
# print(retrieved_documents['0'])
# print(formatted_prompt)
# formatted_prompt_with_history = add_history(formatted_prompt, history)
# formatted_prompt_with_history = formatted_prompt_with_history[:600] # to avoid memory issue
# print(formatted_prompt_with_history)
messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
print(messages)
# binding the system context and new prompt for LLM
# the chat template structure should be based on text generation model format
# indicates the end of a sequence
stream = model.create_chat_completion(messages = messages, max_tokens=1000, stop=["</s>"], stream=False)
# print(f"{stream}")
print("check 7")
print(stream['choices'][0]['message']['content'])
return(stream['choices'][0]['message']['content'])
# text = ""
# for output in stream:
# text += output['choices'][0]['message']['content']
# print(f"{output}")
# print("check3H")
# print(text)
# yield text
# calling the model to generate response based on message/ input
# do_sample if set to True uses strategies to select the next token from the probability distribution over the entire vocabulary
# temperature controls randomness. more renadomness with higher temperature
# only the tokens comprising the top_p probability mass are considered for responses
# This output is a data structure containing all the information returned by generate(), but that can also be used as tuple or dictionary.
TITLE = "AI Copilot for Diabetes Patients"
DESCRIPTION = "I provide answers to concerns related to Diabetes"
import gradio as gr
# Design chatbot
demo = gr.ChatInterface(
fn=talk,
chatbot=gr.Chatbot(
show_label=True,
show_share_button=True,
show_copy_button=True,
likeable=True,
layout="bubble",
bubble_full_width=False,
),
theme="Soft",
examples=[["what is Diabetes?"]],
title=TITLE,
description=DESCRIPTION,
)
# launch chatbot and calls the talk function which in turn calls other functions
print("check14")
#print(historylog)
#memory_panda = pd.DataFrame(historylog)
#Logfile = Dataset.from_pandas(memory_panda)
#Logfile.push_to_hub("Namitg02/Logfile",token = HF_TOKEN)
demo.launch()
@@ -1,179 +0,0 @@
|
|
1 |
-
from datasets import load_dataset
|
2 |
-
from datasets import Dataset
|
3 |
-
from sentence_transformers import SentenceTransformer
|
4 |
-
import faiss
|
5 |
-
import time
|
6 |
-
from datetime import datetime
|
7 |
-
import json
|
8 |
-
#import torch
|
9 |
-
import uuid
|
10 |
-
import pandas as pd
|
11 |
-
from llama_cpp import Llama
|
12 |
-
#from langchain_community.llms import LlamaCpp
|
13 |
-
from threading import Thread
|
14 |
-
from huggingface_hub import Repository, upload_file
|
15 |
-
import os
|
16 |
-
|
17 |
-
|
18 |
-
HF_TOKEN = os.getenv('HF_Token')
|
19 |
-
logfile = 'DiabetesChatLog.txt'
|
20 |
-
|
21 |
-
data = load_dataset("Namitg02/Test", split='train', streaming=False)
|
22 |
-
#Returns a list of dictionaries, each representing a row in the dataset.
|
23 |
-
length = len(data)
|
24 |
-
|
25 |
-
embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
|
26 |
-
embedding_dim = embedding_model.get_sentence_embedding_dimension()
|
27 |
-
# Returns dimensions of embedidng
|
28 |
-
|
29 |
-
|
30 |
-
index = faiss.IndexFlatL2(embedding_dim)
|
31 |
-
data.add_faiss_index("embeddings", custom_index=index)
|
32 |
-
# adds an index column for the embeddings
|
33 |
-
|
34 |
-
#question = "How can I reverse Diabetes?"
|
35 |
-
|
36 |
-
SYS_PROMPT = """You are an assistant for answering questions like a medical person.
|
37 |
-
You are given the extracted parts of document, a question and history of questions and answers . Provide a brief conversational answer.
|
38 |
-
If you do not know the answer, just say "I do not know." Do not make up an answer. Don't repeat the SYS_PROMPT or say that you are referring to document or an article."""
|
39 |
-
# Provides context of how to answer the question
|
40 |
-
|
41 |
-
|
42 |
-
model = Llama(
|
43 |
-
model_path="./meditron-7b-chat.Q4_K_M.gguf",
|
44 |
-
# chat_format="llama-2",
|
45 |
-
n_gpu_layers = 0,
|
46 |
-
temperature=0.75,
|
47 |
-
n_ctx = 4096,
|
48 |
-
top_p=0.95 #,
|
49 |
-
# eos_tokens=terminators
|
50 |
-
# callback_manager=callback_manager,
|
51 |
-
# verbose=True, # Verbose is required to pass to the callback manager
|
52 |
-
)
|
53 |
-
#initiate model and tokenizer
|
54 |
-
|
55 |
-
def search(query: str, k: int = 2 ):
|
56 |
-
"""a function that embeds a new query and returns the most probable results"""
|
57 |
-
embedded_query = embedding_model.encode(query) # create embedding of a new query
|
58 |
-
scores, retrieved_examples = data.get_nearest_examples( # retrieve results
|
59 |
-
"embeddings", embedded_query, # compare our new embedded query with the dataset embeddings
|
60 |
-
k=k # get only top k results
|
61 |
-
)
|
62 |
-
return scores, retrieved_examples
|
63 |
-
# returns scores (List[float]): the retrieval scores from either FAISS (IndexFlatL2 by default) and examples (dict) format
|
64 |
-
# called by talk function that passes prompt
|
65 |
-
|
66 |
-
def format_prompt(prompt,retrieved_documents,k,history,memory_limit=3):
|
67 |
-
"""using the retrieved documents we will prompt the model to generate our responses"""
|
68 |
-
PROMPT = f"Question:{prompt}\nContext:"
|
69 |
-
for idx in range(k) :
|
70 |
-
PROMPT+= f"{retrieved_documents['0'][idx]}\n"
|
71 |
-
print("historyinfo")
|
72 |
-
print(f"{history}")
|
73 |
-
if len(history) == 0:
|
74 |
-
return PROMPT
|
75 |
-
|
76 |
-
if len(history) > memory_limit:
|
77 |
-
history = history[-memory_limit:]
|
78 |
-
|
79 |
-
print("checkwohist")
|
80 |
-
# PROMPT = PROMPT + f"{history[0][0]} [/INST] {history[0][1]} </s>"
|
81 |
-
# Handle conversation history
|
82 |
-
for user_message, bot_message in history[0:]:
|
83 |
-
PROMPT += f"<s>[INST] {user_message} [/INST] {bot_message} </s>"
|
84 |
-
print("checkwthhist2")
|
85 |
-
|
86 |
-
return PROMPT
|
87 |
-
|
88 |
-
|
89 |
-
# Called by talk function to add retrieved documents to the prompt. Keeps adding text of retrieved documents to string that are retreived
|
90 |
-
|
91 |
-
def talk(prompt, history):
|
92 |
-
k = 2 # number of retrieved documents
|
93 |
-
scores , retrieved_documents = search(prompt, k) # get retrival scores and examples in dictionary format based on the prompt passed
|
94 |
-
print(retrieved_documents.keys())
|
95 |
-
# print("check4")
|
96 |
-
formatted_prompt = format_prompt(prompt,retrieved_documents,k,history,memory_limit=3) # create a new prompt using the retrieved documents
|
97 |
-
print("check5")
|
98 |
-
pd.options.display.max_colwidth = 4000
|
99 |
-
messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
|
100 |
-
print(messages)
|
101 |
-
# binding the system context and new prompt for LLM
|
102 |
-
# the chat template structure should be based on text generation model format
|
103 |
-
|
104 |
-
# indicates the end of a sequence
|
105 |
-
stream = model.create_chat_completion(messages = messages,max_tokens =400, stop=["</s>"], stream=False)
|
106 |
-
# print(f"{stream}")
|
107 |
-
print("check 7")
|
108 |
-
print(stream['choices'][0]['message']['content'])
|
109 |
-
response = stream['choices'][0]['message']['content']
|
110 |
-
|
111 |
-
# for user_message, bot_message in history[0:]:
|
112 |
-
# historylog += f"<s>[INST] {user_message} [/INST] {bot_message} </s>"
|
113 |
-
historylog = ''
|
114 |
-
historylog += f"{prompt} \n {response} "
|
115 |
-
print("history log")
|
116 |
-
print(str(historylog))
|
117 |
-
print("history log string printed")
|
118 |
-
|
119 |
-
try:
|
120 |
-
# write data to file
|
121 |
-
unique_filename = f"file_{uuid.uuid4()}.txt"
|
122 |
-
with open(unique_filename, "a") as data:
|
123 |
-
data.write(historylog)
|
124 |
-
|
125 |
-
print("History log printed:")
|
126 |
-
with open(unique_filename, "r") as data:
|
127 |
-
print(data.read())
|
128 |
-
|
129 |
-
|
130 |
-
except IOError as e:
|
131 |
-
print(f"An error occurred: {e}")
|
132 |
-
|
133 |
-
# from huggingface_hub import HfApi
|
134 |
-
# api = HfApi()
|
135 |
-
# api.upload_file(
|
136 |
-
# path_or_fileobj="./file.txt",
|
137 |
-
# path_in_repo="file.txt",
|
138 |
-
# repo_id="Namitg02/Test",
|
139 |
-
# repo_type="space"
|
140 |
-
# )
|
141 |
-
|
142 |
-
print("upload section passed")
|
143 |
-
|
144 |
-
for i in range(len(response)):
|
145 |
-
time.sleep(0.05)
|
146 |
-
yield response[: i+1]
|
147 |
-
|
148 |
-
# calling the model to generate response based on message/ input
|
149 |
-
# do_sample if set to True uses strategies to select the next token from the probability distribution over the entire vocabulary
|
150 |
-
# temperature controls randomness. more renadomness with higher temperature
|
151 |
-
# only the tokens comprising the top_p probability mass are considered for responses
|
152 |
-
# This output is a data structure containing all the information returned by generate(), but that can also be used as tuple or dictionary.
|
153 |
-
|
154 |
-
TITLE = "AI Copilot for Diabetes Patients"
|
155 |
-
|
156 |
-
DESCRIPTION = "I provide answers to concerns related to Diabetes"
|
157 |
-
|
158 |
-
import gradio as gr
|
159 |
-
# Design chatbot
|
160 |
-
demo = gr.ChatInterface(
|
161 |
-
fn=talk,
|
162 |
-
chatbot=gr.Chatbot(
|
163 |
-
show_label=True,
|
164 |
-
show_share_button=True,
|
165 |
-
show_copy_button=True,
|
166 |
-
likeable=True,
|
167 |
-
layout="bubble",
|
168 |
-
bubble_full_width=False,
|
169 |
-
),
|
170 |
-
theme="Soft",
|
171 |
-
examples=[["what is Diabetes?"]],
|
172 |
-
title=TITLE,
|
173 |
-
description=DESCRIPTION,
|
174 |
-
)
|
175 |
-
# launch chatbot and calls the talk function which in turn calls other functions
|
176 |
-
print("check14")
|
177 |
-
|
178 |
-
demo.launch()
|
179 |
-
#demo.launch(auth=("namit", "wolfmagic"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|