Spaces:
Sleeping
Sleeping
LongLe3102000
commited on
Commit
•
386be1d
1
Parent(s):
7e73412
Update app.py
Browse files
app.py
CHANGED
@@ -23,25 +23,25 @@ def respond(encoded_smiles, max_tokens, temperature, top_p, top_k):
|
|
23 |
try:
|
24 |
# Load the Llama model
|
25 |
model_name = "model.gguf"
|
26 |
-
llm = Llama(model_name) # Khởi tạo đối tượng Llama với
|
27 |
|
|
|
|
|
|
|
28 |
# Set generation settings
|
29 |
settings = {
|
30 |
-
"max_new_tokens": max_tokens,
|
31 |
-
"temperature": temperature,
|
32 |
-
"top_p": top_p,
|
33 |
-
"top_k": top_k,
|
34 |
"do_sample": True,
|
35 |
}
|
36 |
|
37 |
-
# Tokenize the input
|
38 |
-
input_ids = llm.tokenize(encoded_smiles)
|
39 |
-
|
40 |
# Generate the output
|
41 |
-
outputs = llm.generate(input_ids
|
42 |
|
43 |
# Decode the output tokens to text
|
44 |
-
output_text = llm.decode(outputs[0]
|
45 |
|
46 |
# Extract the predicted selfies from the output text
|
47 |
first_inst_index = output_text.find("[/INST]")
|
@@ -56,10 +56,10 @@ demo = gr.Interface(
|
|
56 |
fn=respond,
|
57 |
inputs=[
|
58 |
gr.Textbox(label="Encoded SMILES"),
|
59 |
-
gr.Slider(minimum=1, maximum=2048,
|
60 |
-
gr.Slider(minimum=0.1, maximum=4.0,
|
61 |
-
gr.Slider(minimum=0.1, maximum=1.0,
|
62 |
-
gr.Slider(minimum=0, maximum=100,
|
63 |
],
|
64 |
outputs=gr.JSON(label="Results"),
|
65 |
theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet", neutral_hue="gray", font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
|
|
|
23 |
try:
|
24 |
# Load the Llama model
|
25 |
model_name = "model.gguf"
|
26 |
+
llm = Llama(model_name) # Khởi tạo đối tượng Llama với tên mô hình
|
27 |
|
28 |
+
# Tokenize the input
|
29 |
+
input_ids = llm.tokenize(encoded_smiles) # Mã hóa đầu vào thành các IDs token
|
30 |
+
|
31 |
# Set generation settings
|
32 |
settings = {
|
33 |
+
"max_new_tokens": int(max_tokens),
|
34 |
+
"temperature": float(temperature),
|
35 |
+
"top_p": float(top_p),
|
36 |
+
"top_k": int(top_k),
|
37 |
"do_sample": True,
|
38 |
}
|
39 |
|
|
|
|
|
|
|
40 |
# Generate the output
|
41 |
+
outputs = llm.generate(input_ids, **settings)
|
42 |
|
43 |
# Decode the output tokens to text
|
44 |
+
output_text = llm.decode(outputs[0])
|
45 |
|
46 |
# Extract the predicted selfies from the output text
|
47 |
first_inst_index = output_text.find("[/INST]")
|
|
|
56 |
fn=respond,
|
57 |
inputs=[
|
58 |
gr.Textbox(label="Encoded SMILES"),
|
59 |
+
gr.Slider(minimum=1, maximum=2048, default=512, label="Max tokens"),
|
60 |
+
gr.Slider(minimum=0.1, maximum=4.0, default=1.0, label="Temperature"),
|
61 |
+
gr.Slider(minimum=0.1, maximum=1.0, default=1.0, label="Top-p"),
|
62 |
+
gr.Slider(minimum=0, maximum=100, default=50, label="Top-k")
|
63 |
],
|
64 |
outputs=gr.JSON(label="Results"),
|
65 |
theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet", neutral_hue="gray", font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
|