IELTS8 commited on
Commit
1696455
1 Parent(s): e819ea3

Upload 3 files

Browse files
app_modules/overwrites.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import logging
3
+
4
+ from llama_index import Prompt
5
+ from typing import List, Tuple
6
+ import mdtex2html
7
+
8
+ from app_modules.presets import *
9
+ from app_modules.utils import *
10
+
11
+ def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]:
12
+ logging.debug("Compacting text chunks...🚀🚀🚀")
13
+ combined_str = [c.strip() for c in text_chunks if c.strip()]
14
+ combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)]
15
+ combined_str = "\n\n".join(combined_str)
16
+ # resplit based on self.max_chunk_overlap
17
+ text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1)
18
+ return text_splitter.split_text(combined_str)
19
+
20
+
21
+ def postprocess(
22
+ self, y: List[Tuple[str | None, str | None]]
23
+ ) -> List[Tuple[str | None, str | None]]:
24
+ """
25
+ Parameters:
26
+ y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
27
+ Returns:
28
+ List of tuples representing the message and response. Each message and response will be a string of HTML.
29
+ """
30
+ if y is None or y == []:
31
+ return []
32
+ temp = []
33
+ for x in y:
34
+ user, bot = x
35
+ if not detect_converted_mark(user):
36
+ user = convert_asis(user)
37
+ if not detect_converted_mark(bot):
38
+ bot = convert_mdtext(bot)
39
+ temp.append((user, bot))
40
+ return temp
41
+
42
+ with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2:
43
+ customJS = f.read()
44
+ kelpyCodos = f2.read()
45
+
46
+ def reload_javascript():
47
+ print("Reloading javascript...")
48
+ js = f'<script>{customJS}</script><script>{kelpyCodos}</script>'
49
+ def template_response(*args, **kwargs):
50
+ res = GradioTemplateResponseOriginal(*args, **kwargs)
51
+ res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
52
+ res.init_headers()
53
+ return res
54
+
55
+ gr.routes.templates.TemplateResponse = template_response
56
+
57
+ GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse
app_modules/presets.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding:utf-8 -*-
2
+ import gradio as gr
3
+
4
+
5
+ title = """<h1 align="left" style="min-width:200px; margin-top:0;"> <img src="https://raw.githubusercontent.com/twitter/twemoji/master/assets/svg/1f432.svg" width="32px" style="display: inline"> Chat with Baize </h1>"""
6
+ description_top = """\
7
+ <div align="left">
8
+ <p> Currently Running: <a href="https://huggingface.co/project-baize/baize-v2-7b">baize-v2-7b</a></p>
9
+ <p>
10
+ Disclaimer: The LLaMA model is a third-party version available on Hugging Face model hub. This demo should be used for research purposes only. Commercial use is strictly prohibited. The model output is not censored and the authors do not endorse the opinions in the generated content. Use at your own risk.
11
+ </p >
12
+ </div>
13
+ """
14
+ description = """\
15
+ <div align="center" style="margin:16px 0">
16
+ The demo is built on <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT">ChuanhuChatGPT</a>.
17
+ </div>
18
+ """
19
+ CONCURRENT_COUNT = 100
20
+
21
+
22
+ ALREADY_CONVERTED_MARK = "<!-- ALREADY CONVERTED BY PARSER. -->"
23
+
24
+ small_and_beautiful_theme = gr.themes.Soft(
25
+ primary_hue=gr.themes.Color(
26
+ c50="#02C160",
27
+ c100="rgba(2, 193, 96, 0.2)",
28
+ c200="#02C160",
29
+ c300="rgba(2, 193, 96, 0.32)",
30
+ c400="rgba(2, 193, 96, 0.32)",
31
+ c500="rgba(2, 193, 96, 1.0)",
32
+ c600="rgba(2, 193, 96, 1.0)",
33
+ c700="rgba(2, 193, 96, 0.32)",
34
+ c800="rgba(2, 193, 96, 0.32)",
35
+ c900="#02C160",
36
+ c950="#02C160",
37
+ ),
38
+ secondary_hue=gr.themes.Color(
39
+ c50="#576b95",
40
+ c100="#576b95",
41
+ c200="#576b95",
42
+ c300="#576b95",
43
+ c400="#576b95",
44
+ c500="#576b95",
45
+ c600="#576b95",
46
+ c700="#576b95",
47
+ c800="#576b95",
48
+ c900="#576b95",
49
+ c950="#576b95",
50
+ ),
51
+ neutral_hue=gr.themes.Color(
52
+ name="gray",
53
+ c50="#f9fafb",
54
+ c100="#f3f4f6",
55
+ c200="#e5e7eb",
56
+ c300="#d1d5db",
57
+ c400="#B2B2B2",
58
+ c500="#808080",
59
+ c600="#636363",
60
+ c700="#515151",
61
+ c800="#393939",
62
+ c900="#272727",
63
+ c950="#171717",
64
+ ),
65
+ radius_size=gr.themes.sizes.radius_sm,
66
+ ).set(
67
+ button_primary_background_fill="#06AE56",
68
+ button_primary_background_fill_dark="#06AE56",
69
+ button_primary_background_fill_hover="#07C863",
70
+ button_primary_border_color="#06AE56",
71
+ button_primary_border_color_dark="#06AE56",
72
+ button_primary_text_color="#FFFFFF",
73
+ button_primary_text_color_dark="#FFFFFF",
74
+ button_secondary_background_fill="#F2F2F2",
75
+ button_secondary_background_fill_dark="#2B2B2B",
76
+ button_secondary_text_color="#393939",
77
+ button_secondary_text_color_dark="#FFFFFF",
78
+ # background_fill_primary="#F7F7F7",
79
+ # background_fill_primary_dark="#1F1F1F",
80
+ block_title_text_color="*primary_500",
81
+ block_title_background_fill="*primary_100",
82
+ input_background_fill="#F6F6F6",
83
+ )
app_modules/utils.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding:utf-8 -*-
2
+ from __future__ import annotations
3
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
4
+ import logging
5
+ import json
6
+ import os
7
+ import datetime
8
+ import hashlib
9
+ import csv
10
+ import requests
11
+ import re
12
+ import html
13
+ import markdown2
14
+ import torch
15
+ import sys
16
+ import gc
17
+ from pygments.lexers import guess_lexer, ClassNotFound
18
+
19
+ import gradio as gr
20
+ from pypinyin import lazy_pinyin
21
+ import tiktoken
22
+ import mdtex2html
23
+ from markdown import markdown
24
+ from pygments import highlight
25
+ from pygments.lexers import guess_lexer,get_lexer_by_name
26
+ from pygments.formatters import HtmlFormatter
27
+ import transformers
28
+ from peft import PeftModel
29
+ from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
30
+
31
+ from app_modules.presets import *
32
+
33
+ logging.basicConfig(
34
+ level=logging.INFO,
35
+ format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
36
+ )
37
+
38
+
39
+ def markdown_to_html_with_syntax_highlight(md_str):
40
+ def replacer(match):
41
+ lang = match.group(1) or "text"
42
+ code = match.group(2)
43
+ lang = lang.strip()
44
+ #print(1,lang)
45
+ if lang=="text":
46
+ lexer = guess_lexer(code)
47
+ lang = lexer.name
48
+ #print(2,lang)
49
+ try:
50
+ lexer = get_lexer_by_name(lang, stripall=True)
51
+ except ValueError:
52
+ lexer = get_lexer_by_name("python", stripall=True)
53
+ formatter = HtmlFormatter()
54
+ #print(3,lexer.name)
55
+ highlighted_code = highlight(code, lexer, formatter)
56
+
57
+ return f'<pre><code class="{lang}">{highlighted_code}</code></pre>'
58
+
59
+ code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```"
60
+ md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE)
61
+
62
+ html_str = markdown(md_str)
63
+ return html_str
64
+
65
+
66
+ def normalize_markdown(md_text: str) -> str:
67
+ lines = md_text.split("\n")
68
+ normalized_lines = []
69
+ inside_list = False
70
+
71
+ for i, line in enumerate(lines):
72
+ if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()):
73
+ if not inside_list and i > 0 and lines[i - 1].strip() != "":
74
+ normalized_lines.append("")
75
+ inside_list = True
76
+ normalized_lines.append(line)
77
+ elif inside_list and line.strip() == "":
78
+ if i < len(lines) - 1 and not re.match(
79
+ r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()
80
+ ):
81
+ normalized_lines.append(line)
82
+ continue
83
+ else:
84
+ inside_list = False
85
+ normalized_lines.append(line)
86
+
87
+ return "\n".join(normalized_lines)
88
+
89
+
90
+ def convert_mdtext(md_text):
91
+ code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL)
92
+ inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL)
93
+ code_blocks = code_block_pattern.findall(md_text)
94
+ non_code_parts = code_block_pattern.split(md_text)[::2]
95
+
96
+ result = []
97
+ for non_code, code in zip(non_code_parts, code_blocks + [""]):
98
+ if non_code.strip():
99
+ non_code = normalize_markdown(non_code)
100
+ if inline_code_pattern.search(non_code):
101
+ result.append(markdown(non_code, extensions=["tables"]))
102
+ else:
103
+ result.append(mdtex2html.convert(non_code, extensions=["tables"]))
104
+ if code.strip():
105
+ code = f"\n```{code}\n\n```"
106
+ code = markdown_to_html_with_syntax_highlight(code)
107
+ result.append(code)
108
+ result = "".join(result)
109
+ result += ALREADY_CONVERTED_MARK
110
+ return result
111
+
112
+ def convert_asis(userinput):
113
+ return f"<p style=\"white-space:pre-wrap;\">{html.escape(userinput)}</p>"+ALREADY_CONVERTED_MARK
114
+
115
+ def detect_converted_mark(userinput):
116
+ if userinput.endswith(ALREADY_CONVERTED_MARK):
117
+ return True
118
+ else:
119
+ return False
120
+
121
+
122
+
123
+ def detect_language(code):
124
+ if code.startswith("\n"):
125
+ first_line = ""
126
+ else:
127
+ first_line = code.strip().split("\n", 1)[0]
128
+ language = first_line.lower() if first_line else ""
129
+ code_without_language = code[len(first_line) :].lstrip() if first_line else code
130
+ return language, code_without_language
131
+
132
+ def convert_to_markdown(text):
133
+ text = text.replace("$","&#36;")
134
+ def replace_leading_tabs_and_spaces(line):
135
+ new_line = []
136
+
137
+ for char in line:
138
+ if char == "\t":
139
+ new_line.append("&#9;")
140
+ elif char == " ":
141
+ new_line.append("&nbsp;")
142
+ else:
143
+ break
144
+ return "".join(new_line) + line[len(new_line):]
145
+
146
+ markdown_text = ""
147
+ lines = text.split("\n")
148
+ in_code_block = False
149
+
150
+ for line in lines:
151
+ if in_code_block is False and line.startswith("```"):
152
+ in_code_block = True
153
+ markdown_text += f"{line}\n"
154
+ elif in_code_block is True and line.startswith("```"):
155
+ in_code_block = False
156
+ markdown_text += f"{line}\n"
157
+ elif in_code_block:
158
+ markdown_text += f"{line}\n"
159
+ else:
160
+ line = replace_leading_tabs_and_spaces(line)
161
+ line = re.sub(r"^(#)", r"\\\1", line)
162
+ markdown_text += f"{line} \n"
163
+
164
+ return markdown_text
165
+
166
+ def add_language_tag(text):
167
+ def detect_language(code_block):
168
+ try:
169
+ lexer = guess_lexer(code_block)
170
+ return lexer.name.lower()
171
+ except ClassNotFound:
172
+ return ""
173
+
174
+ code_block_pattern = re.compile(r"(```)(\w*\n[^`]+```)", re.MULTILINE)
175
+
176
+ def replacement(match):
177
+ code_block = match.group(2)
178
+ if match.group(2).startswith("\n"):
179
+ language = detect_language(code_block)
180
+ if language:
181
+ return f"```{language}{code_block}```"
182
+ else:
183
+ return f"```\n{code_block}```"
184
+ else:
185
+ return match.group(1) + code_block + "```"
186
+
187
+ text2 = code_block_pattern.sub(replacement, text)
188
+ return text2
189
+
190
+ def delete_last_conversation(chatbot, history):
191
+ if len(chatbot) > 0:
192
+ chatbot.pop()
193
+
194
+ if len(history) > 0:
195
+ history.pop()
196
+
197
+ return (
198
+ chatbot,
199
+ history,
200
+ "Delete Done",
201
+ )
202
+
203
+ def reset_state():
204
+ return [], [], "Reset Done"
205
+
206
+ def reset_textbox():
207
+ return gr.update(value=""),""
208
+
209
+ def cancel_outputing():
210
+ return "Stop Done"
211
+
212
+ def transfer_input(inputs):
213
+ # 一次性返回,降低延迟
214
+ textbox = reset_textbox()
215
+ return (
216
+ inputs,
217
+ gr.update(value=""),
218
+ gr.Button.update(visible=True),
219
+ )
220
+
221
+
222
+ class State:
223
+ interrupted = False
224
+
225
+ def interrupt(self):
226
+ self.interrupted = True
227
+
228
+ def recover(self):
229
+ self.interrupted = False
230
+ shared_state = State()
231
+
232
+
233
+
234
+
235
+
236
+ # Greedy Search
237
+ def greedy_search(input_ids: torch.Tensor,
238
+ model: torch.nn.Module,
239
+ tokenizer: transformers.PreTrainedTokenizer,
240
+ stop_words: list,
241
+ max_length: int,
242
+ temperature: float = 1.0,
243
+ top_p: float = 1.0,
244
+ top_k: int = 25) -> Iterator[str]:
245
+ generated_tokens = []
246
+ past_key_values = None
247
+ current_length = 1
248
+ for i in range(max_length):
249
+ with torch.no_grad():
250
+ if past_key_values is None:
251
+ outputs = model(input_ids)
252
+ else:
253
+ outputs = model(input_ids[:, -1:], past_key_values=past_key_values)
254
+ logits = outputs.logits[:, -1, :]
255
+ past_key_values = outputs.past_key_values
256
+
257
+ # apply temperature
258
+ logits /= temperature
259
+
260
+ probs = torch.softmax(logits, dim=-1)
261
+ # apply top_p
262
+ probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
263
+ probs_sum = torch.cumsum(probs_sort, dim=-1)
264
+ mask = probs_sum - probs_sort > top_p
265
+ probs_sort[mask] = 0.0
266
+
267
+ # apply top_k
268
+ #if top_k is not None:
269
+ # probs_sort1, _ = torch.topk(probs_sort, top_k)
270
+ # min_top_probs_sort = torch.min(probs_sort1, dim=-1, keepdim=True).values
271
+ # probs_sort = torch.where(probs_sort < min_top_probs_sort, torch.full_like(probs_sort, float(0.0)), probs_sort)
272
+
273
+ probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
274
+ next_token = torch.multinomial(probs_sort, num_samples=1)
275
+ next_token = torch.gather(probs_idx, -1, next_token)
276
+
277
+ input_ids = torch.cat((input_ids, next_token), dim=-1)
278
+
279
+ generated_tokens.append(next_token[0].item())
280
+ text = tokenizer.decode(generated_tokens)
281
+
282
+ yield text
283
+ if any([x in text for x in stop_words]):
284
+ del past_key_values
285
+ del logits
286
+ del probs
287
+ del probs_sort
288
+ del probs_idx
289
+ del probs_sum
290
+ gc.collect()
291
+ return
292
+
293
+ def generate_prompt_with_history(text,history,tokenizer,max_length=2048):
294
+ prompt = "The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n[|Human|]Hello!\n[|AI|]Hi!"
295
+ history = ["\n[|Human|]{}\n[|AI|]{}".format(x[0],x[1]) for x in history]
296
+ history.append("\n[|Human|]{}\n[|AI|]".format(text))
297
+ history_text = ""
298
+ flag = False
299
+ for x in history[::-1]:
300
+ if tokenizer(prompt+history_text+x, return_tensors="pt")['input_ids'].size(-1) <= max_length:
301
+ history_text = x + history_text
302
+ flag = True
303
+ else:
304
+ break
305
+ if flag:
306
+ return prompt+history_text,tokenizer(prompt+history_text, return_tensors="pt")
307
+ else:
308
+ return None
309
+
310
+
311
+ def is_stop_word_or_prefix(s: str, stop_words: list) -> bool:
312
+ for stop_word in stop_words:
313
+ if s.endswith(stop_word):
314
+ return True
315
+ for i in range(1, len(stop_word)):
316
+ if s.endswith(stop_word[:i]):
317
+ return True
318
+ return False
319
+
320
+
321
+
322
+ def load_tokenizer_and_model(base_model,adapter_model=None,load_8bit=False):
323
+ if torch.cuda.is_available():
324
+ device = "cuda"
325
+ else:
326
+ device = "cpu"
327
+
328
+ try:
329
+ if torch.backends.mps.is_available():
330
+ device = "mps"
331
+ except: # noqa: E722
332
+ pass
333
+ tokenizer = LlamaTokenizer.from_pretrained(base_model)
334
+ if device == "cuda":
335
+ model = LlamaForCausalLM.from_pretrained(
336
+ base_model,
337
+ load_in_8bit=load_8bit,
338
+ torch_dtype=torch.float16,
339
+ device_map="auto",
340
+ )
341
+ if adapter_model is not None:
342
+ model = PeftModel.from_pretrained(
343
+ model,
344
+ adapter_model,
345
+ torch_dtype=torch.float16,
346
+ )
347
+ elif device == "mps":
348
+ model = LlamaForCausalLM.from_pretrained(
349
+ base_model,
350
+ device_map={"": device},
351
+ torch_dtype=torch.float16,
352
+ )
353
+ if adapter_model is not None:
354
+ model = PeftModel.from_pretrained(
355
+ model,
356
+ adapter_model,
357
+ device_map={"": device},
358
+ torch_dtype=torch.float16,
359
+ )
360
+ else:
361
+ model = LlamaForCausalLM.from_pretrained(
362
+ base_model, device_map={"": device}, low_cpu_mem_usage=True
363
+ )
364
+ if adapter_model is not None:
365
+ model = PeftModel.from_pretrained(
366
+ model,
367
+ adapter_model,
368
+ device_map={"": device},
369
+ )
370
+
371
+ if not load_8bit:
372
+ model.half() # seems to fix bugs for some users.
373
+
374
+ model.eval()
375
+ return tokenizer,model,device
376
+