Spaces:
Sleeping
Sleeping
Added stream response in GPT-4 Model
Browse files- apiGPT4/model/forefront/index.ts +4 -4
- client/js/chat.js +2 -2
- server/backend.py +57 -26
apiGPT4/model/forefront/index.ts
CHANGED
@@ -87,7 +87,7 @@ class AccountPool {
|
|
87 |
}
|
88 |
|
89 |
|
90 |
-
export class Forefrontnew extends Chat implements BrowserUser<Account>{
|
91 |
private pagePool: BrowserPool<Account>;
|
92 |
private accountPool: AccountPool;
|
93 |
|
@@ -139,8 +139,8 @@ export class Forefrontnew extends Chat implements BrowserUser<Account>{
|
|
139 |
|
140 |
private static async closeVIPPop(page: Page) {
|
141 |
try {
|
142 |
-
await page.waitForSelector('.
|
143 |
-
await page.click('.
|
144 |
} catch (e) {
|
145 |
console.log('not need close vip');
|
146 |
}
|
@@ -148,7 +148,7 @@ export class Forefrontnew extends Chat implements BrowserUser<Account>{
|
|
148 |
|
149 |
private static async closeWelcomePop(page: Page) {
|
150 |
try {
|
151 |
-
await page.waitForSelector('.flex > .modal > .modal-box > .flex > .px-3:nth-child(1)', {timeout:
|
152 |
await page.click('.flex > .modal > .modal-box > .flex > .px-3:nth-child(1)')
|
153 |
} catch (e) {
|
154 |
console.log('not need close welcome pop');
|
|
|
87 |
}
|
88 |
|
89 |
|
90 |
+
export class Forefrontnew extends Chat implements BrowserUser<Account> {
|
91 |
private pagePool: BrowserPool<Account>;
|
92 |
private accountPool: AccountPool;
|
93 |
|
|
|
139 |
|
140 |
private static async closeVIPPop(page: Page) {
|
141 |
try {
|
142 |
+
await page.waitForSelector('.grid > .grid > .w-full > .border-t > .text-th-primary-medium', {timeout: 15 * 1000})
|
143 |
+
await page.click('.grid > .grid > .w-full > .border-t > .text-th-primary-medium')
|
144 |
} catch (e) {
|
145 |
console.log('not need close vip');
|
146 |
}
|
|
|
148 |
|
149 |
private static async closeWelcomePop(page: Page) {
|
150 |
try {
|
151 |
+
await page.waitForSelector('.flex > .modal > .modal-box > .flex > .px-3:nth-child(1)', {timeout: 30 * 1000})
|
152 |
await page.click('.flex > .modal > .modal-box > .flex > .px-3:nth-child(1)')
|
153 |
} catch (e) {
|
154 |
console.log('not need close welcome pop');
|
client/js/chat.js
CHANGED
@@ -14,7 +14,7 @@ let prompt_lock = false;
|
|
14 |
hljs.addPlugin(new CopyButtonPlugin());
|
15 |
|
16 |
const format = (text) => {
|
17 |
-
return text.replace(/(?:\r\n|\r
|
18 |
};
|
19 |
|
20 |
message_input.addEventListener("blur", () => {
|
@@ -143,7 +143,7 @@ const ask_gpt = async (message) => {
|
|
143 |
chunk = `cloudflare token expired, please refresh the page.`;
|
144 |
}
|
145 |
|
146 |
-
text
|
147 |
|
148 |
document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text);
|
149 |
document.querySelectorAll(`code`).forEach((el) => {
|
|
|
14 |
hljs.addPlugin(new CopyButtonPlugin());
|
15 |
|
16 |
const format = (text) => {
|
17 |
+
return text.replace(/(?:\r\n|\r|\\n)/g, "\n");
|
18 |
};
|
19 |
|
20 |
message_input.addEventListener("blur", () => {
|
|
|
143 |
chunk = `cloudflare token expired, please refresh the page.`;
|
144 |
}
|
145 |
|
146 |
+
text = format(chunk);
|
147 |
|
148 |
document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text);
|
149 |
document.querySelectorAll(`code`).forEach((el) => {
|
server/backend.py
CHANGED
@@ -59,18 +59,28 @@ class Backend_Api:
|
|
59 |
set_response_language(
|
60 |
prompt['content'], special_instructions[jailbreak])
|
61 |
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
def stream():
|
67 |
if isGPT3Model(model):
|
68 |
response = get_response_gpt3(
|
69 |
-
conversation, self.use_auto_proxy)
|
|
|
70 |
if isGPT4Model(model):
|
71 |
-
response
|
72 |
-
|
73 |
-
yield response
|
74 |
|
75 |
return self.app.response_class(stream(), mimetype='text/event-stream')
|
76 |
|
@@ -85,8 +95,14 @@ class Backend_Api:
|
|
85 |
|
86 |
|
87 |
def filter_jailbroken_response(response):
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
return response
|
91 |
|
92 |
|
@@ -99,7 +115,7 @@ def set_response_language(prompt, special_instructions_list):
|
|
99 |
special_instructions_list[0]['content']
|
100 |
|
101 |
|
102 |
-
def get_response_gpt3(conversation, use_proxy):
|
103 |
while use_proxy:
|
104 |
try:
|
105 |
random_proxy = get_random_proxy()
|
@@ -120,25 +136,33 @@ def get_response_gpt3(conversation, use_proxy):
|
|
120 |
print(f"Error: {e}")
|
121 |
|
122 |
if response is not None:
|
123 |
-
|
|
|
|
|
124 |
return response
|
125 |
|
126 |
|
127 |
-
def get_response_gpt4(conversation):
|
128 |
-
api_url = f"http://127.0.0.1:3000/ask?prompt={conversation}&model=forefront"
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
res
|
133 |
-
res.
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
|
143 |
|
144 |
def isGPT3Model(model):
|
@@ -147,3 +171,10 @@ def isGPT3Model(model):
|
|
147 |
|
148 |
def isGPT4Model(model):
|
149 |
return model == "text-gpt-0040"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
set_response_language(
|
60 |
prompt['content'], special_instructions[jailbreak])
|
61 |
|
62 |
+
# Initialize the conversation with the system message
|
63 |
+
conversation = [{'role': 'system', 'content': system_message}]
|
64 |
+
|
65 |
+
# Add extra results
|
66 |
+
conversation += extra
|
67 |
+
|
68 |
+
# Add jailbreak instructions, if any
|
69 |
+
jailbreak_instructions = isJailbreak(jailbreak)
|
70 |
+
if jailbreak_instructions:
|
71 |
+
conversation += jailbreak_instructions
|
72 |
+
|
73 |
+
# Add the existing conversation and the prompt
|
74 |
+
conversation += _conversation + [prompt]
|
75 |
|
76 |
def stream():
|
77 |
if isGPT3Model(model):
|
78 |
response = get_response_gpt3(
|
79 |
+
conversation, self.use_auto_proxy, jailbreak)
|
80 |
+
yield response
|
81 |
if isGPT4Model(model):
|
82 |
+
for response in get_response_gpt4(conversation, jailbreak):
|
83 |
+
yield response
|
|
|
84 |
|
85 |
return self.app.response_class(stream(), mimetype='text/event-stream')
|
86 |
|
|
|
95 |
|
96 |
|
97 |
def filter_jailbroken_response(response):
|
98 |
+
act_pattern = re.compile(r'ACT:', flags=re.DOTALL)
|
99 |
+
act_match = act_pattern.search(response)
|
100 |
+
|
101 |
+
if act_match:
|
102 |
+
response = response[act_match.end():]
|
103 |
+
else:
|
104 |
+
response = '[Please wait... Unlocking GPT 🔓]'
|
105 |
+
|
106 |
return response
|
107 |
|
108 |
|
|
|
115 |
special_instructions_list[0]['content']
|
116 |
|
117 |
|
118 |
+
def get_response_gpt3(conversation, use_proxy, jailbreak):
|
119 |
while use_proxy:
|
120 |
try:
|
121 |
random_proxy = get_random_proxy()
|
|
|
136 |
print(f"Error: {e}")
|
137 |
|
138 |
if response is not None:
|
139 |
+
if isJailbreak(jailbreak):
|
140 |
+
response = filter_jailbroken_response(response)
|
141 |
+
|
142 |
return response
|
143 |
|
144 |
|
145 |
+
def get_response_gpt4(conversation, jailbreak):
|
146 |
+
api_url = f"http://127.0.0.1:3000/ask/stream?prompt={conversation}&model=forefront"
|
147 |
+
|
148 |
+
try:
|
149 |
+
with requests.get(api_url, stream=True) as res:
|
150 |
+
res.raise_for_status()
|
151 |
+
for response in res.iter_lines(chunk_size=1024, decode_unicode=True, delimiter='\n'):
|
152 |
+
if response.startswith("data: "):
|
153 |
+
print(response)
|
154 |
+
yield filter_response_gpt4(response, jailbreak)
|
155 |
+
except Exception as e:
|
156 |
+
print(f"Error: {e}")
|
157 |
+
|
158 |
+
|
159 |
+
def filter_response_gpt4(response, jailbreak):
|
160 |
+
response = response[6:] # Remove "data: " prefix
|
161 |
+
response = response[1:-1] # Remove the quotation marks
|
162 |
+
if isJailbreak(jailbreak):
|
163 |
+
response = filter_jailbroken_response(response)
|
164 |
+
|
165 |
+
return response
|
166 |
|
167 |
|
168 |
def isGPT3Model(model):
|
|
|
171 |
|
172 |
def isGPT4Model(model):
|
173 |
return model == "text-gpt-0040"
|
174 |
+
|
175 |
+
|
176 |
+
def isJailbreak(jailbreak):
|
177 |
+
if jailbreak != "Default":
|
178 |
+
return special_instructions[jailbreak] if jailbreak in special_instructions else None
|
179 |
+
else:
|
180 |
+
return None
|