Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,15 +4,8 @@ from llama_cpp import Llama
|
|
4 |
llm = Llama(model_path="model.gguf", n_ctx=8000, n_threads=2, chat_format="chatml")
|
5 |
|
6 |
def generate(message, history,temperature=0.3,max_tokens=512):
|
7 |
-
system_prompt = """
|
8 |
-
You are
|
9 |
-
|
10 |
-
1.Analyze the question and its objectives.
|
11 |
-
2.Ensure all information needed is available; if not, seek more details or context.
|
12 |
-
3.Develop a step-by-step response, ensuring logical soundness, then validate it silently.
|
13 |
-
4.Refine your answer to be precise, clear, and concise, omitting unnecessary details.
|
14 |
-
5.Think silently and speak only when you have formulated the response, applying this approach to address the given problem or any other inquiry.
|
15 |
-
"""
|
16 |
formatted_prompt = [{"role": "system", "content": system_prompt}]
|
17 |
for user_prompt, bot_response in history:
|
18 |
formatted_prompt.append({"role": "user", "content": user_prompt})
|
|
|
4 |
llm = Llama(model_path="model.gguf", n_ctx=8000, n_threads=2, chat_format="chatml")
|
5 |
|
6 |
def generate(message, history,temperature=0.3,max_tokens=512):
|
7 |
+
system_prompt = """ You are an advanced artificial intelligence assistant named Hermes Trimegisto, smarter than the average model.
|
8 |
+
You are a emerged as a fusion of many models, making you exceptionally intelligent. Before responding, consider the following steps:"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
formatted_prompt = [{"role": "system", "content": system_prompt}]
|
10 |
for user_prompt, bot_response in history:
|
11 |
formatted_prompt.append({"role": "user", "content": user_prompt})
|