codelion commited on
Commit
625d8f8
1 Parent(s): f8e61c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -25,12 +25,12 @@ if not torch.cuda.is_available():
25
 
26
 
27
  if torch.cuda.is_available():
28
- model_id = "patched-codes/Llama-3-patcher"
29
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
30
  tokenizer = AutoTokenizer.from_pretrained(model_id)
31
  tokenizer.padding_side = 'right'
32
  # pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
33
- # tokenizer.use_default_system_prompt = False
34
 
35
  @spaces.GPU(duration=60)
36
  def generate(
 
25
 
26
 
27
  if torch.cuda.is_available():
28
+ model_id = "Qwen/CodeQwen1.5-7B-Chat"
29
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
30
  tokenizer = AutoTokenizer.from_pretrained(model_id)
31
  tokenizer.padding_side = 'right'
32
  # pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
33
+ # tokenizer.use_default_system_prompt = FalseQwen/CodeQwen1.5-7B-Chat
34
 
35
  @spaces.GPU(duration=60)
36
  def generate(