Amrpyt commited on
Commit
b0f37a7
β€’
1 Parent(s): f63dd16

Upload app.ts

Browse files
Files changed (1) hide show
  1. src/app.ts +464 -0
src/app.ts ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express, { Request, Response, NextFunction } from "express";
2
+ import { ChildProcessWithoutNullStreams, spawn } from "child_process";
3
+ import fs from "fs";
4
+ import path from "path";
5
+ import bodyParser from "body-parser";
6
+ import axios from "axios";
7
+ import https from "https";
8
+ import os from "os";
9
+ import { encode } from "gpt-3-encoder";
10
+ import { randomUUID } from "crypto";
11
+ import { config } from "dotenv";
12
+
13
+ config();
14
+
15
+ // Constants for the server and API configuration
16
+ const port = process.env.SERVER_PORT || 3040;
17
+ const baseUrl = "https://chat.openai.com";
18
+ const apiUrl = `${baseUrl}/backend-anon/conversation`;
19
+ const refreshInterval = 60000; // Interval to refresh token in ms
20
+ const errorWait = 120000; // Wait time in ms after an error
21
+ let cloudflared: ChildProcessWithoutNullStreams;
22
+
23
+ // Initialize global variables to store the session token and device ID
24
+ let token: string;
25
+ let oaiDeviceId: string;
26
+
27
+ // Function to wait for a specified duration
28
+ const wait = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
29
+
30
+ function GenerateCompletionId(prefix: string = "cmpl-") {
31
+ const characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
32
+ const length = 28;
33
+
34
+ for (let i = 0; i < length; i++) {
35
+ prefix += characters.charAt(Math.floor(Math.random() * characters.length));
36
+ }
37
+
38
+ return prefix;
39
+ }
40
+
41
+ async function* chunksToLines(chunksAsync: any) {
42
+ let previous = "";
43
+ for await (const chunk of chunksAsync) {
44
+ const bufferChunk = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
45
+ previous += bufferChunk;
46
+ let eolIndex: number;
47
+ while ((eolIndex = previous.indexOf("\n")) >= 0) {
48
+ // line includes the EOL
49
+ const line = previous.slice(0, eolIndex + 1).trimEnd();
50
+ if (line === "data: [DONE]") break;
51
+ if (line.startsWith("data: ")) yield line;
52
+ previous = previous.slice(eolIndex + 1);
53
+ }
54
+ }
55
+ }
56
+
57
+ async function* linesToMessages(linesAsync: any) {
58
+ for await (const line of linesAsync) {
59
+ const message = line.substring("data :".length);
60
+
61
+ yield message;
62
+ }
63
+ }
64
+
65
+ async function* StreamCompletion(data: any) {
66
+ yield* linesToMessages(chunksToLines(data));
67
+ }
68
+
69
+ // Setup axios instance for API requests with predefined configurations
70
+ const axiosInstance = axios.create({
71
+ httpsAgent: new https.Agent({ rejectUnauthorized: false }),
72
+ proxy:
73
+ process.env.PROXY === "true"
74
+ ? {
75
+ host: process.env.PROXY_HOST,
76
+ port: Number(process.env.PROXY_PORT),
77
+ auth:
78
+ process.env.PROXY_AUTH === "true"
79
+ ? {
80
+ username: process.env.PROXY_USERNAME,
81
+ password: process.env.PROXY_PASSWORD,
82
+ }
83
+ : undefined,
84
+ protocol: process.env.PROXY_PROTOCOL,
85
+ }
86
+ : false,
87
+ headers: {
88
+ accept: "*/*",
89
+ "accept-language": "en-US,en;q=0.9",
90
+ "cache-control": "no-cache",
91
+ "content-type": "application/json",
92
+ "oai-language": "en-US",
93
+ origin: baseUrl,
94
+ pragma: "no-cache",
95
+ referer: baseUrl,
96
+ "sec-ch-ua": '"Google Chrome";v="123", "Not:A-Brand";v="8", "Chromium";v="123"',
97
+ "sec-ch-ua-mobile": "?0",
98
+ "sec-ch-ua-platform": '"Windows"',
99
+ "sec-fetch-dest": "empty",
100
+ "sec-fetch-mode": "cors",
101
+ "sec-fetch-site": "same-origin",
102
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36",
103
+ },
104
+ });
105
+
106
+ // Function to get a new session ID and token from the OpenAI API
107
+ async function getNewSessionId() {
108
+ let newDeviceId = randomUUID();
109
+ const response = await axiosInstance.post(
110
+ `${baseUrl}/backend-anon/sentinel/chat-requirements`,
111
+ {},
112
+ {
113
+ headers: { "oai-device-id": newDeviceId },
114
+ }
115
+ );
116
+ console.log(`System: Successfully refreshed session ID and token. ${!token ? "(Now it's ready to process requests)" : ""}`);
117
+ oaiDeviceId = newDeviceId;
118
+ token = response.data.token;
119
+
120
+ // console.log("New Token:", token);
121
+ // console.log("New Device ID:", oaiDeviceId);
122
+ }
123
+
124
+ // Middleware to enable CORS and handle pre-flight requests
125
+ function enableCORS(req: Request, res: Response, next: NextFunction) {
126
+ res.header("Access-Control-Allow-Origin", "*");
127
+ res.header("Access-Control-Allow-Headers", "*");
128
+ res.header("Access-Control-Allow-Methods", "GET, POST, OPTIONS");
129
+ if (req.method === "OPTIONS") {
130
+ return res.status(200).end();
131
+ }
132
+ next();
133
+ }
134
+
135
+ // Middleware to handle chat completions
136
+ async function handleChatCompletion(req: Request, res: Response) {
137
+ console.log("Request:", `${req.method} ${req.originalUrl}`, `${req.body?.messages?.length ?? 0} messages`, req.body.stream ? "(stream-enabled)" : "(stream-disabled)");
138
+ try {
139
+ const body = {
140
+ action: "next",
141
+ messages: req.body.messages.map((message: { role: any; content: any }) => ({
142
+ author: { role: message.role },
143
+ content: { content_type: "text", parts: [message.content] },
144
+ })),
145
+ parent_message_id: randomUUID(),
146
+ model: "text-davinci-002-render-sha",
147
+ timezone_offset_min: -180,
148
+ suggestions: [],
149
+ history_and_training_disabled: true,
150
+ conversation_mode: { kind: "primary_assistant" },
151
+ websocket_request_id: randomUUID(),
152
+ };
153
+
154
+ let promptTokens = 0;
155
+ let completionTokens = 0;
156
+
157
+ for (let message of req.body.messages) {
158
+ promptTokens += encode(message.content).length;
159
+ }
160
+
161
+ const response = await axiosInstance.post(apiUrl, body, {
162
+ responseType: "stream",
163
+ headers: {
164
+ "oai-device-id": oaiDeviceId,
165
+ "openai-sentinel-chat-requirements-token": token,
166
+ },
167
+ });
168
+
169
+ // Set the response headers based on the request type
170
+ if (req.body.stream) {
171
+ res.setHeader("Content-Type", "text/event-stream");
172
+ res.setHeader("Cache-Control", "no-cache");
173
+ res.setHeader("Connection", "keep-alive");
174
+ } else {
175
+ res.setHeader("Content-Type", "application/json");
176
+ }
177
+
178
+ let fullContent = "";
179
+ let requestId = GenerateCompletionId("chatcmpl-");
180
+ let created = Math.floor(Date.now() / 1000); // Unix timestamp in seconds
181
+ let finish_reason = null;
182
+
183
+ for await (const message of StreamCompletion(response.data)) {
184
+ // Skip heartbeat detection
185
+ if (message.match(/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6}$/)) continue;
186
+
187
+ const parsed = JSON.parse(message);
188
+
189
+ let content = parsed?.message?.content?.parts[0] ?? "";
190
+ let status = parsed?.message?.status ?? "";
191
+
192
+ for (let message of req.body.messages) {
193
+ if (message.content === content) {
194
+ content = "";
195
+ break;
196
+ }
197
+ }
198
+
199
+ switch (status) {
200
+ case "in_progress":
201
+ finish_reason = null;
202
+ break;
203
+ case "finished_successfully":
204
+ let finish_reason_data = parsed?.message?.metadata?.finish_details?.type ?? null;
205
+ switch (finish_reason_data) {
206
+ case "max_tokens":
207
+ finish_reason = "length";
208
+ break;
209
+ case "stop":
210
+ default:
211
+ finish_reason = "stop";
212
+ }
213
+ break;
214
+ default:
215
+ finish_reason = null;
216
+ }
217
+
218
+ if (content === "") continue;
219
+
220
+ let completionChunk = content.replace(fullContent, "");
221
+
222
+ completionTokens += encode(completionChunk).length;
223
+
224
+ if (req.body.stream) {
225
+ let response = {
226
+ id: requestId,
227
+ created: created,
228
+ object: "chat.completion.chunk",
229
+ model: "gpt-3.5-turbo",
230
+ choices: [
231
+ {
232
+ delta: {
233
+ content: completionChunk,
234
+ },
235
+ index: 0,
236
+ finish_reason: finish_reason,
237
+ },
238
+ ],
239
+ };
240
+
241
+ res.write(`data: ${JSON.stringify(response)}\n\n`);
242
+ }
243
+
244
+ fullContent = content.length > fullContent.length ? content : fullContent;
245
+ }
246
+
247
+ if (req.body.stream) {
248
+ res.write(
249
+ `data: ${JSON.stringify({
250
+ id: requestId,
251
+ created: created,
252
+ object: "chat.completion.chunk",
253
+ model: "gpt-3.5-turbo",
254
+ choices: [
255
+ {
256
+ delta: {
257
+ content: "",
258
+ },
259
+ index: 0,
260
+ finish_reason: finish_reason,
261
+ },
262
+ ],
263
+ })}\n\n`
264
+ );
265
+ } else {
266
+ res.write(
267
+ JSON.stringify({
268
+ id: requestId,
269
+ created: created,
270
+ model: "gpt-3.5-turbo",
271
+ object: "chat.completion",
272
+ choices: [
273
+ {
274
+ finish_reason: finish_reason,
275
+ index: 0,
276
+ message: {
277
+ content: fullContent,
278
+ role: "assistant",
279
+ },
280
+ },
281
+ ],
282
+ usage: {
283
+ prompt_tokens: promptTokens,
284
+ completion_tokens: completionTokens,
285
+ total_tokens: promptTokens + completionTokens,
286
+ },
287
+ })
288
+ );
289
+ }
290
+
291
+ res.end();
292
+ } catch (error: any) {
293
+ // console.log('Error:', error.response?.data ?? error.message);
294
+ if (!res.headersSent) res.setHeader("Content-Type", "application/json");
295
+ // console.error('Error handling chat completion:', error);
296
+ res.write(
297
+ JSON.stringify({
298
+ status: false,
299
+ error: {
300
+ message: "An error occurred. Please check the server console to confirm it is ready and free of errors. Additionally, ensure that your request complies with OpenAI's policy.",
301
+ type: "invalid_request_error",
302
+ },
303
+ support: "https://discord.pawan.krd",
304
+ })
305
+ );
306
+ res.end();
307
+ }
308
+ }
309
+
310
+ // Initialize Express app and use middlewares
311
+ const app = express();
312
+ app.use(bodyParser.json());
313
+ app.use(enableCORS);
314
+
315
+ // Route to handle POST requests for chat completions
316
+ app.post("/v1/chat/completions", handleChatCompletion);
317
+
318
+ // 404 handler for unmatched routes
319
+ app.use((req, res) =>
320
+ res.status(404).send({
321
+ status: false,
322
+ error: {
323
+ message: `The requested endpoint (${req.method.toLocaleUpperCase()} ${req.path}) was not found. please make sure to use "http://localhost:3040/v1" as the base URL.`,
324
+ type: "invalid_request_error",
325
+ },
326
+ support: "https://discord.pawan.krd",
327
+ })
328
+ );
329
+
330
+ async function DownloadCloudflared(): Promise<string> {
331
+ const platform = os.platform();
332
+ let url: string;
333
+
334
+ if (platform === "win32") {
335
+ const arch = os.arch() === "x64" ? "amd64" : "386";
336
+ url = `https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-windows-${arch}.exe`;
337
+ } else {
338
+ let arch = os.arch();
339
+ switch (arch) {
340
+ case "x64":
341
+ arch = "amd64";
342
+ break;
343
+ case "arm":
344
+ case "arm64":
345
+ break;
346
+ default:
347
+ arch = "amd64"; // Default to amd64 if unknown architecture
348
+ }
349
+ const platformLower = platform.toLowerCase();
350
+ url = `https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-${platformLower}-${arch}`;
351
+ }
352
+
353
+ const fileName = platform === "win32" ? "cloudflared.exe" : "cloudflared";
354
+ const filePath = path.resolve(fileName);
355
+
356
+ if (fs.existsSync(filePath)) {
357
+ return filePath;
358
+ }
359
+
360
+ try {
361
+ const response = await axiosInstance({
362
+ method: "get",
363
+ url: url,
364
+ responseType: "stream",
365
+ });
366
+
367
+ const writer = fs.createWriteStream(filePath);
368
+
369
+ response.data.pipe(writer);
370
+
371
+ return new Promise<string>((resolve, reject) => {
372
+ writer.on("finish", () => {
373
+ if (platform !== "win32") {
374
+ fs.chmodSync(filePath, 0o755);
375
+ }
376
+ resolve(filePath);
377
+ });
378
+
379
+ writer.on("error", reject);
380
+ });
381
+ } catch (error: any) {
382
+ // console.error("Failed to download file:", error.message);
383
+ return null;
384
+ }
385
+ }
386
+
387
+ async function StartCloudflaredTunnel(cloudflaredPath: string): Promise<string> {
388
+ const localUrl = `http://localhost:${port}`;
389
+ return new Promise<string>((resolve, reject) => {
390
+ cloudflared = spawn(cloudflaredPath, ["tunnel", "--url", localUrl]);
391
+
392
+ cloudflared.stdout.on("data", (data: any) => {
393
+ const output = data.toString();
394
+ // console.log("Cloudflared Output:", output);
395
+
396
+ // Adjusted regex to specifically match URLs that end with .trycloudflare.com
397
+ const urlMatch = output.match(/https:\/\/[^\s]+\.trycloudflare\.com/);
398
+ if (urlMatch) {
399
+ let url = urlMatch[0];
400
+ resolve(url);
401
+ }
402
+ });
403
+
404
+ cloudflared.stderr.on("data", (data: any) => {
405
+ const output = data.toString();
406
+ // console.error("Error from cloudflared:", output);
407
+
408
+ const urlMatch = output.match(/https:\/\/[^\s]+\.trycloudflare\.com/);
409
+ if (urlMatch) {
410
+ let url = urlMatch[0];
411
+ resolve(url);
412
+ }
413
+ });
414
+
415
+ cloudflared.on("close", (code: any) => {
416
+ resolve(null);
417
+ // console.log(`Cloudflared tunnel process exited with code ${code}`);
418
+ });
419
+ });
420
+ }
421
+
422
+ // Start the server and the session ID refresh loop
423
+ app.listen(port, async () => {
424
+ if (process.env.CLOUDFLARED === undefined) process.env.CLOUDFLARED = "true";
425
+ let cloudflared = process.env.CLOUDFLARED === "true";
426
+ let filePath: string;
427
+ let publicURL: string;
428
+ if (cloudflared) {
429
+ filePath = await DownloadCloudflared();
430
+ publicURL = await StartCloudflaredTunnel(filePath);
431
+ }
432
+
433
+ console.log(`πŸ’‘ Server is running at http://localhost:${port}`);
434
+ console.log();
435
+ console.log(`πŸ”— Local Base URL: http://localhost:${port}/v1`);
436
+ console.log(`πŸ”— Local Endpoint: http://localhost:${port}/v1/chat/completions`);
437
+ console.log();
438
+ if (cloudflared && publicURL) console.log(`πŸ”— Public Base URL: ${publicURL}/v1`);
439
+ if (cloudflared && publicURL) console.log(`πŸ”— Public Endpoint: ${publicURL}/v1/chat/completions`);
440
+ else if (cloudflared && !publicURL) {
441
+ console.log("πŸ”— Public Endpoint: (Failed to start cloudflared tunnel, please restart the server.)");
442
+ if (filePath) fs.unlinkSync(filePath);
443
+ }
444
+ if (cloudflared && publicURL) console.log();
445
+ console.log("πŸ“ Author: Pawan.Krd");
446
+ console.log(`🌐 Discord server: https://discord.gg/pawan`);
447
+ console.log("🌍 GitHub Repository: https://github.com/PawanOsman/ChatGPT");
448
+ console.log(`πŸ’– Don't forget to star the repository if you like this project!`);
449
+ console.log();
450
+
451
+ setTimeout(async () => {
452
+ while (true) {
453
+ try {
454
+ await getNewSessionId();
455
+ await wait(refreshInterval);
456
+ } catch (error) {
457
+ console.error("Error refreshing session ID, retrying in 2 minute...");
458
+ console.error("If this error persists, your country may not be supported yet.");
459
+ console.error("If your country was the issue, please consider using a U.S. VPN.");
460
+ await wait(errorWait);
461
+ }
462
+ }
463
+ }, 0);
464
+ });