jfelipenc commited on
Commit
2279621
1 Parent(s): 7fae487

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -0
app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import torch
3
+ import gradio as gr
4
+ from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM
5
+ from peft import PeftModel, PeftConfig
6
+ from textwrap import wrap, fill
7
+
8
+ MAX_LENGTH=1000
9
+
10
+ def wrap_text(text, width=90):
11
+ lines = text.split('\n')
12
+ wrapped_lines = [fill(line, width=width) for line in lines]
13
+ wrapped_text = '\n'.join(wrapped_lines)
14
+ return wrapped_text
15
+
16
+ def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
17
+ formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
18
+
19
+ encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
20
+ model_inputs = encodeds.to(device)
21
+
22
+ output = peft_model.generate(
23
+ **model_inputs,
24
+ max_length=MAX_LENGTH,
25
+ use_cache=True,
26
+ early_stopping=True,
27
+ bos_token_id=peft_model.config.bos_token_id,
28
+ eos_token_id=peft_model.config.eos_token_id,
29
+ pad_token_id=peft_model.config.eos_token_id,
30
+ temperature=0.1,
31
+ do_sample=True
32
+ )
33
+
34
+ response_text = tokenizer.decode(output[0], skip_special_tokens=True)
35
+
36
+ return response_text
37
+
38
+ device = "cuda" if torch.cuda.is_available() else "cpu"
39
+
40
+ base_model_id = "mistralai/Mistral-7B-v0.1"
41
+ model_directory = "Tonic/mistralmed"
42
+
43
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True, padding_side="left")
44
+ tokenizer.pad_token = tokenizer.eos_token
45
+ tokenizer.padding_side = 'left'
46
+
47
+ peft_config = PeftConfig.from_pretrained("Tonic/mistralmed", token="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")
48
+ peft_model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True)
49
+ peft_model = PeftModel.from_pretrained(peft_model, "Tonic/mistralmed", token="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")
50
+
51
+ class ChatBot:
52
+ def __init__(self):
53
+ self.history = []
54
+
55
+ def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
56
+ formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
57
+
58
+ user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
59
+
60
+ response = peft_model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
61
+
62
+ response_text = tokenizer.decode(response[0], skip_special_tokens=True)
63
+ return response_text
64
+
65
+ bot = ChatBot()
66
+
67
+ title = "👋🏻토닉의 미스트랄메드 채팅에 오신 것을 환영합니다🚀👋🏻Welcome to Tonic's MistralMed Chat🚀"
68
+ description = "이 공간을 사용하여 현재 모델을 테스트할 수 있습니다. [(Tonic/MistralMed)](https://huggingface.co/Tonic/MistralMed) 또는 이 공간을 복제하고 로컬 또는 🤗HuggingFace에서 사용할 수 있습니다. [Discord에서 함께 만들기 위해 Discord에 가입하십시오](https://discord.gg/VqTxc76K3u). You can use this Space to test out the current model [(Tonic/MistralMed)](https://huggingface.co/Tonic/MistralMed) or duplicate this Space and use it locally or on 🤗HuggingFace. [Join me on Discord to build together](https://discord.gg/VqTxc76K3u)."
69
+ examples = [["[Question:] What is the proper treatment for buccal herpes?", "You are a medicine and public health expert, you will receive a question, answer the question, and complete the answer"]]
70
+
71
+ iface = gr.Interface(
72
+ fn=bot.predict,
73
+ title=title,
74
+ description=description,
75
+ examples=examples,
76
+ inputs=["text", "text"],
77
+ outputs="text",
78
+ theme="ParityError/Anime"
79
+ )
80
+
81
+ iface.launch()