Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
7 |
# Load model and tokenizer
|
8 |
model_name = "Salesforce/xLAM-1b-fc-r"
|
9 |
|
10 |
-
title = f"# 🚀
|
11 |
|
12 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
@@ -18,7 +18,7 @@ torch.random.manual_seed(0)
|
|
18 |
@spaces.GPU
|
19 |
def generate_response(query):
|
20 |
messages = [
|
21 |
-
{'role': 'user', 'content':
|
22 |
]
|
23 |
|
24 |
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
|
|
7 |
# Load model and tokenizer
|
8 |
model_name = "Salesforce/xLAM-1b-fc-r"
|
9 |
|
10 |
+
title = f"# 🚀Eval Model: {model_name}"
|
11 |
|
12 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
18 |
@spaces.GPU
|
19 |
def generate_response(query):
|
20 |
messages = [
|
21 |
+
{'role': 'user', 'content': query}
|
22 |
]
|
23 |
|
24 |
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|