:gem: [Feature] Enable system prompt in agent
Browse files
networks/llm_requester.js
CHANGED
@@ -27,6 +27,8 @@ export class ChatCompletionsRequester {
|
|
27 |
this.openai_endpoint =
|
28 |
openai_endpoint || this.extract_openai_endpoint_and_model()[0];
|
29 |
this.model = model || this.extract_openai_endpoint_and_model()[1];
|
|
|
|
|
30 |
this.temperature = temperature || this.agent_info.temperature;
|
31 |
this.top_p = top_p || this.agent_info.top_p;
|
32 |
this.max_output_tokens =
|
@@ -50,6 +52,11 @@ export class ChatCompletionsRequester {
|
|
50 |
}
|
51 |
construct_backend_request_body() {
|
52 |
this.openai_request_messages = get_request_messages();
|
|
|
|
|
|
|
|
|
|
|
53 |
this.backend_request_body = {
|
54 |
openai_endpoint: this.openai_endpoint,
|
55 |
openai_request_method: "POST",
|
|
|
27 |
this.openai_endpoint =
|
28 |
openai_endpoint || this.extract_openai_endpoint_and_model()[0];
|
29 |
this.model = model || this.extract_openai_endpoint_and_model()[1];
|
30 |
+
this.system_prompt = this.agent_info.system_prompt;
|
31 |
+
|
32 |
this.temperature = temperature || this.agent_info.temperature;
|
33 |
this.top_p = top_p || this.agent_info.top_p;
|
34 |
this.max_output_tokens =
|
|
|
52 |
}
|
53 |
construct_backend_request_body() {
|
54 |
this.openai_request_messages = get_request_messages();
|
55 |
+
this.system_message = {
|
56 |
+
role: "system",
|
57 |
+
content: this.system_prompt,
|
58 |
+
};
|
59 |
+
this.openai_request_messages.unshift(this.system_message);
|
60 |
this.backend_request_body = {
|
61 |
openai_endpoint: this.openai_endpoint,
|
62 |
openai_request_method: "POST",
|