Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -40,7 +40,7 @@ def load_model():
|
|
40 |
model = 'TheBloke/Llama-2-7B-GGUF',
|
41 |
model_type = 'llama',
|
42 |
max_new_tokens = 1096,
|
43 |
-
temperature = 0.
|
44 |
repetition_penalty = 1.13,
|
45 |
gpu_layers = 2
|
46 |
)
|
@@ -61,8 +61,8 @@ def bot(query):
|
|
61 |
llm_response = llmcahin.run({"query":query})
|
62 |
return llm_response
|
63 |
|
64 |
-
with gr.Blocks(title="
|
65 |
-
gr.Markdown("#
|
66 |
chatbot = gr.Chatbot([],elem_id="chatbot",height=700)
|
67 |
msg = gr.Textbox()
|
68 |
clear = gr.ClearButton([msg,chatbot])
|
|
|
40 |
model = 'TheBloke/Llama-2-7B-GGUF',
|
41 |
model_type = 'llama',
|
42 |
max_new_tokens = 1096,
|
43 |
+
temperature = 0.6,
|
44 |
repetition_penalty = 1.13,
|
45 |
gpu_layers = 2
|
46 |
)
|
|
|
61 |
llm_response = llmcahin.run({"query":query})
|
62 |
return llm_response
|
63 |
|
64 |
+
with gr.Blocks(title="chat llama 7b") as demo:
|
65 |
+
gr.Markdown("# chat llama")
|
66 |
chatbot = gr.Chatbot([],elem_id="chatbot",height=700)
|
67 |
msg = gr.Textbox()
|
68 |
clear = gr.ClearButton([msg,chatbot])
|