Tonic commited on
Commit
b01335d
1 Parent(s): 13d456d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -27
app.py CHANGED
@@ -1,18 +1,11 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
2
- import gradio as gr
3
-
4
- # api token for huggingface.co
5
- api_token = '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'
6
-
7
 
8
  # Use the base model's ID
9
  base_model_id = "mistralai/Mistral-7B-v0.1"
10
 
11
- # Create a configuration object specific to the base model (you can replace with your model's actual configuration if available)
12
- config = BertConfig()
13
-
14
  # Load the fine-tuned model "Tonic/mistralmed"
15
- model = AutoModel.from_pretrained("Tonic/mistralmed", config=config)
16
 
17
  tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True)
18
  tokenizer.pad_token = tokenizer.eos_token
@@ -27,25 +20,25 @@ class ChatBot:
27
  flat_history = [item for sublist in self.history for item in sublist]
28
  flat_history_tensor = torch.tensor(flat_history).unsqueeze(dim=0)
29
  bot_input_ids = torch.cat([flat_history_tensor, new_user_input_ids], dim=-1) if self.history else new_user_input_ids
30
- chat_history_ids = model.generate(bot_input_ids, max_length=2000, pad_token_id=tokenizer.eos_token_id)
31
  self.history.append(chat_history_ids[:, bot_input_ids.shape[-1]:].tolist()[0])
32
  response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
33
  return response
34
 
35
  bot = ChatBot()
36
-
37
- title = "👋🏻Welcome to Tonic's EZ Chat🚀"
38
- description = "You can use this Space to test out the current model (MistralMed) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on [Discord](https://discord.gg/fpEPNZGsbt) to build together."
39
- examples = [["What is the boiling point of nitrogen"]]
40
-
41
- iface = gr.Interface(
42
- fn=bot.predict,
43
- title=title,
44
- description=description,
45
- examples=examples,
46
- inputs="text",
47
- outputs="text",
48
- theme="ParityError/Anime"
49
- )
50
-
51
- iface.launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import gradio as gr
 
 
 
 
3
 
4
  # Use the base model's ID
5
  base_model_id = "mistralai/Mistral-7B-v0.1"
6
 
 
 
 
7
  # Load the fine-tuned model "Tonic/mistralmed"
8
+ model = AutoModelForCausalLM.from_pretrained("Tonic/mistralmed")
9
 
10
  tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True)
11
  tokenizer.pad_token = tokenizer.eos_token
 
20
  flat_history = [item for sublist in self.history for item in sublist]
21
  flat_history_tensor = torch.tensor(flat_history).unsqueeze(dim=0)
22
  bot_input_ids = torch.cat([flat_history_tensor, new_user_input_ids], dim=-1) if self.history else new_user_input_ids
23
+ chat_history_ids = model.generate(bot_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
24
  self.history.append(chat_history_ids[:, bot_input_ids.shape[-1]:].tolist()[0])
25
  response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
26
  return response
27
 
28
  bot = ChatBot()
29
+
30
+ title = "👋🏻Welcome to Tonic's MistralMed Chat🚀"
31
+ description = "You can use this Space to test out the current model (MistralMed) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on Discord to build together."
32
+ examples = [["What is the boiling point of nitrogen"]]
33
+
34
+ iface = gr.Interface(
35
+ fn=bot.predict,
36
+ title=title,
37
+ description=description,
38
+ examples=examples,
39
+ inputs="text",
40
+ outputs="text",
41
+ theme="ParityError/Anime"
42
+ )
43
+
44
+ iface.launch()