not-lain commited on
Commit
31d7c4a
β€’
1 Parent(s): 62765de

🌘wπŸŒ–

Browse files
Files changed (1) hide show
  1. app.py +131 -84
app.py CHANGED
@@ -3,7 +3,7 @@ from datasets import load_dataset
3
 
4
  import os
5
  import spaces
6
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
7
  import torch
8
  from threading import Thread
9
  from sentence_transformers import SentenceTransformer
@@ -11,107 +11,156 @@ from datasets import load_dataset
11
  import time
12
 
13
  token = os.environ["HF_TOKEN"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  model = AutoModelForCausalLM.from_pretrained(
15
- "google/gemma-7b-it",
16
- # torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
17
- torch_dtype=torch.float16,
18
- token=token,
 
19
  )
20
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it", token=token)
21
- device = torch.device("cuda")
22
- model = model.to(device)
23
- RAG = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
24
- TOP_K = 1
25
- HEADER = "\n# RESOURCES:\n"
26
- # prepare data
27
- # since data is too big we will only select the first 3K lines
28
 
29
- data = load_dataset("not-lain/wikipedia-small-3000-embedded", split="train")
 
 
30
 
31
- # index dataset
32
- data.add_faiss_index("embedding")
33
 
34
 
35
- def search(query: str, k: int = TOP_K):
36
- embedded_query = RAG.encode(query)
37
- scores, retrieved_examples = data.get_nearest_examples(
38
- "embedding", embedded_query, k=k
 
 
39
  )
40
- return retrieved_examples
41
 
42
-
43
- def prepare_prompt(query, retrieved_examples):
44
- prompt = (
45
- f"Query: {query}\nContinue to answer the query in short sentences by using the Search Results:\n"
46
- )
47
- urls = []
48
- titles = retrieved_examples["title"][::-1]
49
- texts = retrieved_examples["text"][::-1]
50
- urls = retrieved_examples["url"][::-1]
51
- titles = titles[::-1]
52
- for i in range(TOP_K):
53
- prompt += f"* {texts[i]}\n"
54
- return prompt, zip(titles, urls)
55
 
56
 
57
  @spaces.GPU(duration=150)
58
- def talk(message, history):
59
- print("history, ", history)
60
- print("message ", message)
61
- print("searching dataset ...")
62
- retrieved_examples = search(message)
63
- print("preparing prompt ...")
64
- message, metadata = prepare_prompt(message, retrieved_examples)
65
- resources = HEADER
66
- print("preparing metadata ...")
67
- for title, url in metadata:
68
- resources += f"[{title}]({url}), "
69
- print("preparing chat template ...")
70
- chat = []
71
- for item in history:
72
- chat.append({"role": "user", "content": item[0]})
73
- cleaned_past = item[1].split(HEADER)[0]
74
- chat.append({"role": "assistant", "content": cleaned_past})
75
- chat.append({"role": "user", "content": message})
76
- messages = tokenizer.apply_chat_template(
77
- chat, tokenize=False, add_generation_prompt=True
78
  )
79
- print("chat template prepared, ", messages)
80
- print("tokenizing input ...")
81
- # Tokenize the messages string
82
- model_inputs = tokenizer([messages], return_tensors="pt").to(device)
83
  streamer = TextIteratorStreamer(
84
- tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
85
- )
86
  generate_kwargs = dict(
87
- model_inputs,
88
  streamer=streamer,
89
  max_new_tokens=1024,
90
  do_sample=True,
91
  top_p=0.95,
92
- top_k=1000,
93
  temperature=0.75,
94
- num_beams=1,
95
  )
96
- print("initializing thread ...")
97
  t = Thread(target=model.generate, kwargs=generate_kwargs)
98
  t.start()
99
- time.sleep(1)
100
- # Initialize an empty string to store the generated text
101
- partial_text = ""
102
- i = 0
103
- while t.is_alive():
104
- try:
105
- for new_text in streamer:
106
- if new_text is not None:
107
- partial_text += new_text
108
- yield partial_text
109
- except Exception as e:
110
- print(f"retry number {i}\n LOGS:\n")
111
- i+=1
112
- print(e, e.args)
113
- partial_text += resources
114
- yield partial_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
 
117
  TITLE = "# RAG"
@@ -122,11 +171,9 @@ A rag pipeline with a chatbot feature
122
  Resources used to build this project :
123
 
124
  * embedding model : https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1
125
- * dataset : https://huggingface.co/datasets/not-lain/wikipedia-small-3000-embedded (used mxbai-colbert-large-v1 to create the embedding column )
126
  * faiss docs : https://huggingface.co/docs/datasets/v2.18.0/en/package_reference/main_classes#datasets.Dataset.add_faiss_index
127
- * chatbot : https://huggingface.co/google/gemma-7b-it
128
-
129
- If you want to support my work consider clicking on the heart react button β€οΈπŸ€—
130
  """
131
 
132
 
 
3
 
4
  import os
5
  import spaces
6
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
7
  import torch
8
  from threading import Thread
9
  from sentence_transformers import SentenceTransformer
 
11
  import time
12
 
13
  token = os.environ["HF_TOKEN"]
14
+ ST = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
15
+
16
+ dataset = load_dataset("not-lain/wikipedia",revision = "embedded")
17
+
18
+ data = dataset["train"]
19
+ data = data.add_faiss_index("embeddings") # column name that has the embeddings of the dataset
20
+
21
+
22
+ model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
23
+
24
+ # use quantization to lower GPU usage
25
+ bnb_config = BitsAndBytesConfig(
26
+ load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16
27
+ )
28
+
29
+ tokenizer = AutoTokenizer.from_pretrained(model_id,token=token)
30
  model = AutoModelForCausalLM.from_pretrained(
31
+ model_id,
32
+ torch_dtype=torch.bfloat16,
33
+ device_map="auto",
34
+ quantization_config=bnb_config,
35
+ token=token
36
  )
37
+ terminators = [
38
+ tokenizer.eos_token_id,
39
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
40
+ ]
 
 
 
 
41
 
42
+ SYS_PROMPT = """You are an assistant for answering questions.
43
+ You are given the extracted parts of a long document and a question. Provide a conversational answer.
44
+ If you don't know the answer, just say "I do not know." Don't make up an answer."""
45
 
 
 
46
 
47
 
48
+ def search(query: str, k: int = 3 ):
49
+ """a function that embeds a new query and returns the most probable results"""
50
+ embedded_query = ST.encode(query) # embed new query
51
+ scores, retrieved_examples = data.get_nearest_examples( # retrieve results
52
+ "embeddings", embedded_query, # compare our new embedded query with the dataset embeddings
53
+ k=k # get only top k results
54
  )
55
+ return scores, retrieved_examples
56
 
57
+ def format_prompt(prompt,retrieved_documents,k):
58
+ """using the retrieved documents we will prompt the model to generate our responses"""
59
+ PROMPT = f"Question:{prompt}\nContext:"
60
+ for idx in range(k) :
61
+ PROMPT+= f"{retrieved_documents['text'][idx]}\n"
62
+ return PROMPT
 
 
 
 
 
 
 
63
 
64
 
65
  @spaces.GPU(duration=150)
66
+ def talk(message,history):
67
+ k = 1 # number of retrieved documents
68
+ scores , retrieved_documents = search(prompt, k)
69
+ formatted_prompt = format_prompt(prompt,retrieved_documents,k)
70
+ formatted_prompt = formatted_prompt[:2000] # to avoid GPU OOM
71
+ messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
72
+ # tell the model to generate
73
+ input_ids = tokenizer.apply_chat_template(
74
+ messages,
75
+ add_generation_prompt=True,
76
+ return_tensors="pt"
77
+ ).to(model.device)
78
+ outputs = model.generate(
79
+ input_ids,
80
+ max_new_tokens=1024,
81
+ eos_token_id=terminators,
82
+ do_sample=True,
83
+ temperature=0.6,
84
+ top_p=0.9,
 
85
  )
 
 
 
 
86
  streamer = TextIteratorStreamer(
87
+ tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
88
+ )
89
  generate_kwargs = dict(
90
+ input_ids= input_ids,
91
  streamer=streamer,
92
  max_new_tokens=1024,
93
  do_sample=True,
94
  top_p=0.95,
 
95
  temperature=0.75,
96
+ eos_token_id=terminators,
97
  )
 
98
  t = Thread(target=model.generate, kwargs=generate_kwargs)
99
  t.start()
100
+
101
+ outputs = []
102
+ for text in streamer:
103
+ outputs.append(text)
104
+ print(outputs)
105
+ yield "".join(outputs)
106
+
107
+ # def talk(message, history):
108
+ # print("history, ", history)
109
+ # print("message ", message)
110
+ # print("searching dataset ...")
111
+ # retrieved_examples = search(message)
112
+ # print("preparing prompt ...")
113
+ # message, metadata = prepare_prompt(message, retrieved_examples)
114
+ # resources = HEADER
115
+ # print("preparing metadata ...")
116
+ # for title, url in metadata:
117
+ # resources += f"[{title}]({url}), "
118
+ # print("preparing chat template ...")
119
+ # chat = []
120
+ # for item in history:
121
+ # chat.append({"role": "user", "content": item[0]})
122
+ # cleaned_past = item[1].split(HEADER)[0]
123
+ # chat.append({"role": "assistant", "content": cleaned_past})
124
+ # chat.append({"role": "user", "content": message})
125
+ # messages = tokenizer.apply_chat_template(
126
+ # chat, tokenize=False, add_generation_prompt=True
127
+ # )
128
+ # print("chat template prepared, ", messages)
129
+ # print("tokenizing input ...")
130
+ # # Tokenize the messages string
131
+ # model_inputs = tokenizer([messages], return_tensors="pt").to(device)
132
+ # streamer = TextIteratorStreamer(
133
+ # tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
134
+ # )
135
+ # generate_kwargs = dict(
136
+ # model_inputs,
137
+ # streamer=streamer,
138
+ # max_new_tokens=1024,
139
+ # do_sample=True,
140
+ # top_p=0.95,
141
+ # top_k=1000,
142
+ # temperature=0.75,
143
+ # num_beams=1,
144
+ # )
145
+ # print("initializing thread ...")
146
+ # t = Thread(target=model.generate, kwargs=generate_kwargs)
147
+ # t.start()
148
+ # time.sleep(1)
149
+ # # Initialize an empty string to store the generated text
150
+ # partial_text = ""
151
+ # i = 0
152
+ # while t.is_alive():
153
+ # try:
154
+ # for new_text in streamer:
155
+ # if new_text is not None:
156
+ # partial_text += new_text
157
+ # yield partial_text
158
+ # except Exception as e:
159
+ # print(f"retry number {i}\n LOGS:\n")
160
+ # i+=1
161
+ # print(e, e.args)
162
+ # partial_text += resources
163
+ # yield partial_text
164
 
165
 
166
  TITLE = "# RAG"
 
171
  Resources used to build this project :
172
 
173
  * embedding model : https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1
174
+ * dataset : https://huggingface.co/datasets/not-lain/wikipedia
175
  * faiss docs : https://huggingface.co/docs/datasets/v2.18.0/en/package_reference/main_classes#datasets.Dataset.add_faiss_index
176
+ * chatbot : https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct
 
 
177
  """
178
 
179