Tonic commited on
Commit
6b13747
1 Parent(s): 6282503

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -17
app.py CHANGED
@@ -5,18 +5,6 @@ import gradio as gr
5
  import random
6
  from textwrap import wrap
7
 
8
- # Define the PeftConfig
9
- peft_config = PeftConfig(
10
- max_length=500,
11
- use_cache=True,
12
- early_stopping=False,
13
- bos_token_id=peft_model.config.bos_token_id,
14
- eos_token_id=peft_model.config.eos_token_id,
15
- pad_token_id=peft_model.config.eos_token_id,
16
- temperature=0.4,
17
- do_sample=True
18
- )
19
-
20
  # Functions to Wrap the Prompt Correctly
21
  def wrap_text(text, width=90):
22
  lines = text.split('\n')
@@ -62,21 +50,20 @@ def multimodal_prompt(user_input, system_prompt="You are an expert medical analy
62
  device = "cuda" if torch.cuda.is_available() else "cpu"
63
 
64
  # Use the base model's ID
65
- base_model_id = "mistralai/Mistral-7B-v0.1"
66
  model_directory = "Tonic/GaiaMiniMed"
67
 
68
  # Instantiate the Tokenizer
69
- tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True, padding_side="left")
70
  # tokenizer = AutoTokenizer.from_pretrained("Tonic/mistralmed", trust_remote_code=True, padding_side="left")
71
  tokenizer.pad_token = tokenizer.eos_token
72
  tokenizer.padding_side = 'left'
73
 
74
  # Load the GaiaMiniMed model with the specified configuration
75
  # Load the Peft model with a specific configuration
76
- peft_model = PeftModel.from_pretrained("Tonic/GaiaMiniMed", config=peft_config)
77
  peft_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b-instruct")
78
- # Now you can use peft_model without any NameError
79
- peft_model = peft_model.to_bettertransformer("tiiuae/falcon-7b-instruct")
80
 
81
 
82
  # Specify the configuration class for the model
 
5
  import random
6
  from textwrap import wrap
7
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  # Functions to Wrap the Prompt Correctly
9
  def wrap_text(text, width=90):
10
  lines = text.split('\n')
 
50
  device = "cuda" if torch.cuda.is_available() else "cpu"
51
 
52
  # Use the base model's ID
53
+ base_model_id = "tiiuae/falcon-7b-instruct"
54
  model_directory = "Tonic/GaiaMiniMed"
55
 
56
  # Instantiate the Tokenizer
57
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b-instruct", trust_remote_code=True, padding_side="left")
58
  # tokenizer = AutoTokenizer.from_pretrained("Tonic/mistralmed", trust_remote_code=True, padding_side="left")
59
  tokenizer.pad_token = tokenizer.eos_token
60
  tokenizer.padding_side = 'left'
61
 
62
  # Load the GaiaMiniMed model with the specified configuration
63
  # Load the Peft model with a specific configuration
64
+ peft_model = PeftModel.from_pretrained("Tonic/GaiaMiniMed")
65
  peft_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b-instruct")
66
+ peft_model = PeftModel.from_pretrained(peft_model, "Tonic/GaiaMiniMed")
 
67
 
68
 
69
  # Specify the configuration class for the model