Tonic commited on
Commit
2bc99a0
โ€ข
1 Parent(s): 8dcd0f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -64
app.py CHANGED
@@ -1,84 +1,68 @@
1
- from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM
2
- from peft import PeftModel, PeftConfig
 
 
3
  import torch
4
  import gradio as gr
5
- import random
6
- from textwrap import wrap
7
 
 
 
8
 
9
- title = "๐Ÿ‘‹๐Ÿปํ† ๋‹‰์˜ ๋ฏธ์ŠคํŠธ๋ž„๋ฉ”๋“œ ์ฑ„ํŒ…์— ์˜ค์‹  ๊ฒƒ์„ ํ™˜์˜ํ•ฉ๋‹ˆ๋‹ค๐Ÿš€๐Ÿ‘‹๐ŸปWelcome to Tonic's MistralMed Chat๐Ÿš€"
10
- description = "์ด ๊ณต๊ฐ„์„ ์‚ฌ์šฉํ•˜์—ฌ ํ˜„์žฌ ๋ชจ๋ธ์„ ํ…Œ์ŠคํŠธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. [(Tonic/MistralMed)](https://huggingface.co/Tonic/MistralMed) ๋˜๋Š” ์ด ๊ณต๊ฐ„์„ ๋ณต์ œํ•˜๊ณ  ๋กœ์ปฌ ๋˜๋Š” ๐Ÿค—HuggingFace์—์„œ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. [Discord์—์„œ ํ•จ๊ป˜ ๋งŒ๋“ค๊ธฐ ์œ„ํ•ด Discord์— ๊ฐ€์ž…ํ•˜์‹ญ์‹œ์˜ค](https://discord.gg/VqTxc76K3u). You can use this Space to test out the current model [(Tonic/MistralMed)](https://huggingface.co/Tonic/MistralMed) or duplicate this Space and use it locally or on ๐Ÿค—HuggingFace. [Join me on Discord to build together](https://discord.gg/VqTxc76K3u)."
11
- examples = [["[Question:] What is the proper treatment for buccal herpes?", "You are a medicine and public health expert, you will receive a question, answer the question, and provide a complete answer"]]
 
12
 
13
- base_model_id = "mistralai/Mistral-7B-v0.1"
14
- model_directory = "Tonic/mistralmed"
15
- device = "cuda" if torch.cuda.is_available() else "cpu"
16
 
17
- def wrap_text(text, width=90):
18
- lines = text.split('\n')
19
- wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
20
- wrapped_text = '\n'.join(wrapped_lines)
21
- return wrapped_text
22
 
23
- def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
 
 
 
 
24
 
25
- formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]</s>"
 
 
 
26
 
27
- encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
28
- model_inputs = encodeds.to(device)
 
 
 
 
 
 
 
29
 
30
- output = model.generate(
31
- **model_inputs,
32
- max_length=max_length,
33
- use_cache=True,
34
- early_stopping=True,
35
- bos_token_id=model.config.bos_token_id,
36
- eos_token_id=model.config.eos_token_id,
37
- pad_token_id=model.config.eos_token_id,
38
- temperature=0.1,
39
- do_sample=False
40
- )
41
 
42
- # Decode the response
43
- response_text = tokenizer.decode(output[0], skip_special_tokens=True)
44
 
45
- return response_text
46
 
47
-
48
- tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True, padding_side="left")
49
- tokenizer.pad_token = tokenizer.eos_token
50
- tokenizer.padding_side = 'left'
51
- peft_config = PeftConfig.from_pretrained("Tonic/mistralmed")
52
- peft_model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
53
- peft_model = PeftModel.from_pretrained(peft_model, "Tonic/mistralmed")
54
- peft_model = peft_model.to(torch.bfloat16)
55
- peft_model = peft_model.to(device)
56
-
57
- class ChatBot:
58
- def __init__(self):
59
- self.history = []
60
-
61
- class ChatBot:
62
- def __init__(self):
63
- self.history = []
64
-
65
- def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
66
- formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
67
- user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
68
- user_input_ids = user_input_ids.to(device)
69
- response = peft_model.generate(input_ids=user_input_ids, max_length=256, pad_token_id=tokenizer.eos_token_id)
70
- response_text = tokenizer.decode(response[0], skip_special_tokens=True)
71
-
72
- return response_text
73
-
74
- bot = ChatBot()
75
 
76
  iface = gr.Interface(
77
- fn=bot.predict,
78
  title=title,
79
  description=description,
80
- examples=examples,
81
- inputs=["text", "text"],
 
 
 
 
 
 
82
  outputs="text",
83
  theme="ParityError/Anime"
84
  )
 
1
+ import os
2
+ import math
3
+ import transformers
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
  import torch
6
  import gradio as gr
7
+ import sentencepiece
 
8
 
9
+ title = "Welcome to Tonic's ๐Ÿ‹๐ŸณOrca-2-13B (in 8bit)!"
10
+ description = "You can use [๐Ÿ‹๐Ÿณmicrosoft/Orca-2-13b](https://huggingface.co/microsoft/Orca-2-13b) via API using Gradio by scrolling down and clicking Use 'Via API' or privately by [cloning this space on huggingface](https://huggingface.co/spaces/Tonic1/TonicsOrca2?duplicate=true) . [Join my active builders' server on discord](https://discord.gg/VqTxc76K3u). Big thanks to the HuggingFace Organisation for the Community Grant."
11
 
12
+ # os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:50'
13
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
14
+ model_name = "microsoft/Orca-2-13b"
15
+ # offload_folder = './model_weights'
16
 
17
+ # if not os.path.exists(offload_folder):
18
+ # os.makedirs(offload_folder)
 
19
 
20
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
21
+ model = transformers.AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True)
 
 
 
22
 
23
+ class OrcaChatBot:
24
+ def __init__(self, model, tokenizer, system_message="You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."):
25
+ self.model = model
26
+ self.tokenizer = tokenizer
27
+ self.system_message = system_message
28
 
29
+ def predict(self, user_message, temperature=0.4, max_new_tokens=70, top_p=0.99, repetition_penalty=1.9):
30
+ prompt = f"<|im_start|>system\n{self.system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
31
+ inputs = self.tokenizer(prompt, return_tensors='pt', add_special_tokens=False)
32
+ input_ids = inputs["input_ids"].to(self.model.device)
33
 
34
+ output_ids = self.model.generate(
35
+ input_ids,
36
+ max_length=input_ids.shape[1] + max_new_tokens,
37
+ temperature=temperature,
38
+ top_p=top_p,
39
+ repetition_penalty=repetition_penalty,
40
+ pad_token_id=self.tokenizer.eos_token_id,
41
+ do_sample=True
42
+ )
43
 
44
+ response = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
45
 
46
+ return response
 
47
 
48
+ Orca_bot = OrcaChatBot(model, tokenizer)
49
 
50
+ def gradio_predict(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty):
51
+ full_message = f"{system_message}\n{user_message}" if system_message else user_message
52
+ return Orca_bot.predict(full_message, temperature, max_new_tokens, top_p, repetition_penalty)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  iface = gr.Interface(
55
+ fn=gradio_predict,
56
  title=title,
57
  description=description,
58
+ inputs=[
59
+ gr.Textbox(label="Your Message", type="text", lines=3),
60
+ gr.Textbox(label="Introduce a Character Here or Set a Scene (system prompt)", type="text", lines=2),
61
+ gr.Slider(label="Max new tokens", value=125, minimum=25, maximum=256, step=1),
62
+ gr.Slider(label="Temperature", value=0.1, minimum=0.05, maximum=1.0, step=0.05),
63
+ gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.01, maximum=0.99, step=0.05),
64
+ gr.Slider(label="Repetition penalty", value=1.9, minimum=1.0, maximum=2.0, step=0.05)
65
+ ],
66
  outputs="text",
67
  theme="ParityError/Anime"
68
  )