ruslanmv commited on
Commit
9b0ea4a
1 Parent(s): 0fbe780

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
3
  import torch
4
- model_name = "ruslanmv/Medical-Llama3-8B"
5
  device_map = 'auto'
6
  # Check if GPU is available
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -21,11 +21,11 @@ if device.type == "cuda":
21
  device_map=device_map
22
  )
23
  else:
24
- model = AutoModelForCausalLM.from_pretrained(
25
- model_name,
26
- trust_remote_code=True,
27
- use_cache=False
28
- )
29
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
30
  tokenizer.pad_token = tokenizer.eos_token
31
 
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
3
  import torch
4
+ model_name = "ruslanmv/Medical-Llama3-8B""
5
  device_map = 'auto'
6
  # Check if GPU is available
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
21
  device_map=device_map
22
  )
23
  else:
24
+ model = AutoModelForCausalLM.from_pretrained(model_name)
25
+
26
+ # Load model directly
27
+ from transformers import AutoTokenizer, AutoModelForCausalLM
28
+
29
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
30
  tokenizer.pad_token = tokenizer.eos_token
31