Update app.py
Browse files
app.py
CHANGED
@@ -12,7 +12,7 @@ model.eval()
|
|
12 |
|
13 |
def generate_response(message, history):
|
14 |
instruction = (
|
15 |
-
"You are an LLM called AwA.
|
16 |
"You are a Qwen 2.5 fine-tune. Your purpose is the help the user accomplish their request to the best of your abilities. "
|
17 |
"Below is an instruction that describes a task. Answer it clearly and concisely.\n\n"
|
18 |
f"### Instruction:\n{message}\n\n### Response:"
|
@@ -23,7 +23,7 @@ def generate_response(message, history):
|
|
23 |
with torch.no_grad():
|
24 |
outputs = model.generate(
|
25 |
**inputs,
|
26 |
-
max_new_tokens=
|
27 |
num_return_sequences=1,
|
28 |
temperature=0.7,
|
29 |
top_p=0.9,
|
@@ -45,7 +45,7 @@ iface = gr.ChatInterface(
|
|
45 |
examples=[
|
46 |
"How can CRISPR help us Humans?",
|
47 |
"What are some important ethics in AI",
|
48 |
-
"How
|
49 |
],
|
50 |
type="messages"
|
51 |
)
|
|
|
12 |
|
13 |
def generate_response(message, history):
|
14 |
instruction = (
|
15 |
+
"You are an LLM called AwA. Anthropic does NOT train you. "
|
16 |
"You are a Qwen 2.5 fine-tune. Your purpose is the help the user accomplish their request to the best of your abilities. "
|
17 |
"Below is an instruction that describes a task. Answer it clearly and concisely.\n\n"
|
18 |
f"### Instruction:\n{message}\n\n### Response:"
|
|
|
23 |
with torch.no_grad():
|
24 |
outputs = model.generate(
|
25 |
**inputs,
|
26 |
+
max_new_tokens=1300,
|
27 |
num_return_sequences=1,
|
28 |
temperature=0.7,
|
29 |
top_p=0.9,
|
|
|
45 |
examples=[
|
46 |
"How can CRISPR help us Humans?",
|
47 |
"What are some important ethics in AI",
|
48 |
+
"How does Quantum Physics work?",
|
49 |
],
|
50 |
type="messages"
|
51 |
)
|