Spaces:
Sleeping
Sleeping
updates
Browse files- .gitignore +1 -0
- server/backend.py +11 -2
- server/utils.py +77 -1
.gitignore
CHANGED
@@ -3,3 +3,4 @@ translations/*
|
|
3 |
/server/__pycache__/*
|
4 |
*.pyc
|
5 |
server/__pycache__/backend.cpython-310.pyc
|
|
|
|
3 |
/server/__pycache__/*
|
4 |
*.pyc
|
5 |
server/__pycache__/backend.cpython-310.pyc
|
6 |
+
*.mo
|
server/backend.py
CHANGED
@@ -4,7 +4,7 @@ from g4f import ChatCompletion
|
|
4 |
from flask import request, Response, stream_with_context
|
5 |
from requests import get
|
6 |
from server.config import special_instructions
|
7 |
-
from server.utils import check_model
|
8 |
from langchain_community.llms import Ollama
|
9 |
import requests
|
10 |
|
@@ -60,9 +60,11 @@ class Backend_Api:
|
|
60 |
try:
|
61 |
api_key = request.json['api_key']
|
62 |
jailbreak = request.json['jailbreak']
|
|
|
63 |
model = request.json['model']
|
64 |
check_model(model)
|
65 |
messages = build_messages(jailbreak)
|
|
|
66 |
local_mode_1=True
|
67 |
local_model_2 =False
|
68 |
print(model)
|
@@ -75,9 +77,16 @@ class Backend_Api:
|
|
75 |
|
76 |
if local_mode_1:
|
77 |
content=messages[0]['content']
|
|
|
|
|
|
|
78 |
llm = Ollama(model=model)
|
|
|
|
|
79 |
print("content:",content)
|
80 |
-
|
|
|
|
|
81 |
return response
|
82 |
elif local_model_2:
|
83 |
# Use the local model to generate the response
|
|
|
4 |
from flask import request, Response, stream_with_context
|
5 |
from requests import get
|
6 |
from server.config import special_instructions
|
7 |
+
from server.utils import check_model, make_prompt , make_simple_prompt
|
8 |
from langchain_community.llms import Ollama
|
9 |
import requests
|
10 |
|
|
|
60 |
try:
|
61 |
api_key = request.json['api_key']
|
62 |
jailbreak = request.json['jailbreak']
|
63 |
+
print("jailbreak:",jailbreak)
|
64 |
model = request.json['model']
|
65 |
check_model(model)
|
66 |
messages = build_messages(jailbreak)
|
67 |
+
print("messages:",messages)
|
68 |
local_mode_1=True
|
69 |
local_model_2 =False
|
70 |
print(model)
|
|
|
77 |
|
78 |
if local_mode_1:
|
79 |
content=messages[0]['content']
|
80 |
+
input = request.json['meta']['content']['parts'][0]['content']
|
81 |
+
#prompt=make_simple_prompt(input,messages)
|
82 |
+
prompt=make_prompt(input,messages,model)
|
83 |
llm = Ollama(model=model)
|
84 |
+
print("messages",messages)
|
85 |
+
print("len(messages)",len(messages))
|
86 |
print("content:",content)
|
87 |
+
print("input",input)
|
88 |
+
print("prompt",prompt)
|
89 |
+
response = llm.invoke(prompt)
|
90 |
return response
|
91 |
elif local_model_2:
|
92 |
# Use the local model to generate the response
|
server/utils.py
CHANGED
@@ -50,4 +50,80 @@ def check_model(model_name):
|
|
50 |
print(f"Failed to download model '{model_name}': {e}")
|
51 |
return
|
52 |
else:
|
53 |
-
print("OK")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
print(f"Failed to download model '{model_name}': {e}")
|
51 |
return
|
52 |
else:
|
53 |
+
print("OK")
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
def make_simple_prompt(input, messages):
|
58 |
+
"""
|
59 |
+
Create a simple prompt based on the input and messages.
|
60 |
+
|
61 |
+
:param input: str, input message from the user
|
62 |
+
:param messages: list, conversation history as a list of dictionaries containing 'role' and 'content'
|
63 |
+
:return: str, generated prompt
|
64 |
+
"""
|
65 |
+
if len(messages) == 1:
|
66 |
+
prompt = f'''You are a friendly AI companion.
|
67 |
+
You should answer what the user request.
|
68 |
+
user: {input}'''
|
69 |
+
else:
|
70 |
+
conversation_history = '\n'.join(
|
71 |
+
f"{message['role']}: {message['content']}" for message in reversed(messages[:-1])
|
72 |
+
)
|
73 |
+
prompt = f'''You are a friendly AI companion.
|
74 |
+
history: {conversation_history}.
|
75 |
+
You should answer what the user request.
|
76 |
+
user: {input}'''
|
77 |
+
|
78 |
+
print(prompt)
|
79 |
+
return prompt
|
80 |
+
|
81 |
+
|
82 |
+
def make_prompt(input, messages, model):
|
83 |
+
"""
|
84 |
+
Create a prompt based on the input, messages, and model used.
|
85 |
+
|
86 |
+
:param input: str, input message from the user
|
87 |
+
:param messages: list, conversation history as a list of dictionaries containing 'role' and 'content'
|
88 |
+
:param model: str, name of the model ("llama3", "mistral", or other)
|
89 |
+
:return: str, generated prompt
|
90 |
+
"""
|
91 |
+
if model == "llama3":
|
92 |
+
# Special Tokens used with Meta Llama 3
|
93 |
+
BEGIN_OF_TEXT = "<|begin_of_text|>"
|
94 |
+
EOT_ID = "<|eot_id|>"
|
95 |
+
START_HEADER_ID = "<|start_header_id|>"
|
96 |
+
END_HEADER_ID = "<|end_header_id|>"
|
97 |
+
elif model == "mistral":
|
98 |
+
# Special tokens Mistral
|
99 |
+
BEGIN_OF_TEXT = "<s>"
|
100 |
+
EOT_ID = "</s>"
|
101 |
+
START_HEADER_ID = "" # Not applicable to Mistral
|
102 |
+
END_HEADER_ID = "" # Not applicable to Mistral
|
103 |
+
else:
|
104 |
+
# No Special tokens
|
105 |
+
BEGIN_OF_TEXT = ""
|
106 |
+
EOT_ID = ""
|
107 |
+
START_HEADER_ID = ""
|
108 |
+
END_HEADER_ID = ""
|
109 |
+
|
110 |
+
if len(messages) == 1:
|
111 |
+
prompt = f'''{BEGIN_OF_TEXT}{START_HEADER_ID}system{END_HEADER_ID}
|
112 |
+
You are a friendly AI companion.
|
113 |
+
{EOT_ID}{START_HEADER_ID}user{END_HEADER_ID}
|
114 |
+
{input}
|
115 |
+
{EOT_ID}'''
|
116 |
+
else:
|
117 |
+
conversation_history = '\n'.join(
|
118 |
+
f"{START_HEADER_ID}{message['role']}{END_HEADER_ID}\n{message['content']}{EOT_ID}" for message in reversed(messages[:-1])
|
119 |
+
)
|
120 |
+
prompt = f'''{BEGIN_OF_TEXT}{START_HEADER_ID}system{END_HEADER_ID}
|
121 |
+
You are a friendly AI companion.
|
122 |
+
history:
|
123 |
+
{conversation_history}
|
124 |
+
{EOT_ID}{START_HEADER_ID}user{END_HEADER_ID}
|
125 |
+
{input}
|
126 |
+
{EOT_ID}'''
|
127 |
+
|
128 |
+
print(prompt)
|
129 |
+
return prompt
|