File size: 16,637 Bytes
3f5871c
a4c3fca
7c4fdc9
2e28042
 
e943a05
2e28042
5da61b4
 
3a63ab8
e943a05
 
9960338
 
2cb745f
0e5c445
3f5871c
e6addfc
 
 
 
 
 
537b6f5
6887755
ad02fa3
9db8ced
b7b2c8c
 
e943a05
ad02fa3
922b1b2
 
e943a05
922b1b2
 
 
 
e943a05
e6addfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad02fa3
 
2e28042
ad02fa3
 
 
5da61b4
ad02fa3
 
e943a05
ba93cf8
d5559df
ba93cf8
 
 
 
105d8aa
 
5c9a37f
105d8aa
5c9a37f
 
 
 
105d8aa
5c9a37f
105d8aa
5c9a37f
 
 
 
 
 
 
105d8aa
5c9a37f
 
7c4fdc9
 
537b6f5
 
 
 
 
 
 
 
 
 
922b1b2
537b6f5
 
 
 
 
922b1b2
 
e943a05
cf7ac8d
 
 
b7b2c8c
cf7ac8d
06e879d
e943a05
ad02fa3
e943a05
4a6603b
 
e943a05
77399ca
 
e943a05
0e5c445
4a6603b
 
e6addfc
537b6f5
 
 
 
 
 
 
e943a05
77399ca
e943a05
0e5c445
4a6603b
 
 
537b6f5
 
 
0e5c445
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e6addfc
 
 
 
77399ca
e6addfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77399ca
e6addfc
 
 
8b0abbc
 
 
 
 
e6addfc
 
8b0abbc
 
 
 
 
 
 
e6addfc
 
 
 
 
 
8b0abbc
 
 
 
 
e6addfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77399ca
e6addfc
 
 
 
 
 
 
 
 
 
 
 
 
4a6603b
e6addfc
 
 
 
 
 
 
 
 
ac04347
 
 
 
 
 
e6addfc
ac04347
 
 
 
 
 
77399ca
 
e943a05
 
 
e6addfc
e943a05
 
e6addfc
e943a05
6f3588a
 
 
 
e943a05
6f3588a
 
 
 
 
e943a05
ad02fa3
e943a05
da1e5da
6ba40e9
e6addfc
6ba40e9
e6addfc
6ba40e9
e6addfc
 
 
 
 
 
 
 
 
 
 
6ba40e9
 
 
f249cfc
6ba40e9
f249cfc
 
 
 
 
 
 
 
 
 
 
 
 
3f5871c
6887755
d4016bc
 
 
 
 
3f5871c
d4f393d
 
 
 
3f5871c
d4f393d
 
 
 
 
3f5871c
e6addfc
d4f393d
6887755
 
 
 
 
 
 
 
 
 
d4f393d
6887755
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e943a05
f00349e
e6addfc
 
 
00443e1
e6addfc
 
 
9db8ced
e6addfc
77399ca
d4016bc
 
a01ed5a
 
8b0abbc
 
9db8ced
 
e6addfc
 
6887755
e6addfc
d4016bc
e6addfc
9db8ced
 
 
a01ed5a
 
c1caa1f
 
 
a01ed5a
 
c1caa1f
a01ed5a
c1caa1f
a01ed5a
 
e6addfc
 
 
 
 
 
 
 
9db8ced
e6addfc
 
 
e943a05
9db8ced
8582ce1
 
9db8ced
a8ca669
 
 
e6addfc
a8ca669
 
 
 
 
e6addfc
9db8ced
e943a05
9db8ced
d4016bc
9db8ced
0f7a55d
 
d4016bc
0f7a55d
 
 
 
 
 
a01ed5a
 
 
 
 
 
 
e943a05
77399ca
9db8ced
e943a05
9db8ced
5da61b4
e943a05
9db8ced
e6addfc
9db8ced
 
 
e943a05
 
 
77399ca
 
 
9db8ced
 
e6addfc
9db8ced
0e5c445
6ba40e9
ce3e193
0e5c445
e943a05
 
77399ca
 
 
 
e943a05
77399ca
 
e6addfc
77399ca
 
 
 
 
 
e943a05
 
ad02fa3
a4c3fca
 
 
 
 
 
 
 
ad02fa3
73a5c0d
 
 
 
 
ad02fa3
 
1a14c61
 
 
 
 
2e28042
1a14c61
 
 
5da61b4
1a14c61
 
252a449
1a14c61
 
 
 
640dc14
 
 
 
3a63ab8
 
 
 
 
2e28042
3a63ab8
 
 
 
 
 
640dc14
 
 
 
 
 
 
 
3a63ab8
640dc14
3a63ab8
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
import { MESSAGES_BEFORE_LOGIN, ENABLE_ASSISTANTS_RAG } from "$env/static/private";
import { startOfHour } from "date-fns";
import { authCondition, requiresUser } from "$lib/server/auth";
import { collections } from "$lib/server/database";
import { models } from "$lib/server/models";
import { ERROR_MESSAGES } from "$lib/stores/errors";
import type { Message } from "$lib/types/Message";
import { error } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
import { z } from "zod";
import type { MessageUpdate } from "$lib/types/MessageUpdate";
import { runWebSearch } from "$lib/server/websearch/runWebSearch";
import { abortedGenerations } from "$lib/server/abortedGenerations";
import { summarize } from "$lib/server/summarize";
import { uploadFile } from "$lib/server/files/uploadFile";
import sizeof from "image-size";
import type { Assistant } from "$lib/types/Assistant";
import { convertLegacyConversation } from "$lib/utils/tree/convertLegacyConversation";
import { isMessageId } from "$lib/utils/tree/isMessageId";
import { buildSubtree } from "$lib/utils/tree/buildSubtree.js";
import { addChildren } from "$lib/utils/tree/addChildren.js";
import { addSibling } from "$lib/utils/tree/addSibling.js";
import { preprocessMessages } from "$lib/server/preprocessMessages.js";
import { usageLimits } from "$lib/server/usageLimits";
import { isURLLocal } from "$lib/server/isURLLocal.js";

export async function POST({ request, locals, params, getClientAddress }) {
	const id = z.string().parse(params.id);
	const convId = new ObjectId(id);
	const promptedAt = new Date();

	const userId = locals.user?._id ?? locals.sessionId;

	// check user
	if (!userId) {
		throw error(401, "Unauthorized");
	}

	// check if the user has access to the conversation
	const convBeforeCheck = await collections.conversations.findOne({
		_id: convId,
		...authCondition(locals),
	});

	if (convBeforeCheck && !convBeforeCheck.rootMessageId) {
		const res = await collections.conversations.updateOne(
			{
				_id: convId,
			},
			{
				$set: {
					...convBeforeCheck,
					...convertLegacyConversation(convBeforeCheck),
				},
			}
		);

		if (!res.acknowledged) {
			throw error(500, "Failed to convert conversation");
		}
	}

	const conv = await collections.conversations.findOne({
		_id: convId,
		...authCondition(locals),
	});

	if (!conv) {
		throw error(404, "Conversation not found");
	}

	// register the event for ratelimiting
	await collections.messageEvents.insertOne({
		userId,
		createdAt: new Date(),
		ip: getClientAddress(),
	});

	const messagesBeforeLogin = MESSAGES_BEFORE_LOGIN ? parseInt(MESSAGES_BEFORE_LOGIN) : 0;

	// guest mode check
	if (!locals.user?._id && requiresUser && messagesBeforeLogin) {
		const totalMessages =
			(
				await collections.conversations
					.aggregate([
						{ $match: { ...authCondition(locals), "messages.from": "assistant" } },
						{ $project: { messages: 1 } },
						{ $limit: messagesBeforeLogin + 1 },
						{ $unwind: "$messages" },
						{ $match: { "messages.from": "assistant" } },
						{ $count: "messages" },
					])
					.toArray()
			)[0]?.messages ?? 0;

		if (totalMessages > messagesBeforeLogin) {
			throw error(429, "Exceeded number of messages before login");
		}
	}

	if (usageLimits?.messagesPerMinute) {
		// check if the user is rate limited
		const nEvents = Math.max(
			await collections.messageEvents.countDocuments({ userId }),
			await collections.messageEvents.countDocuments({ ip: getClientAddress() })
		);
		if (nEvents > usageLimits.messagesPerMinute) {
			throw error(429, ERROR_MESSAGES.rateLimited);
		}
	}

	if (usageLimits?.messages && conv.messages.length > usageLimits.messages) {
		throw error(
			429,
			`This conversation has more than ${usageLimits.messages} messages. Start a new one to continue`
		);
	}

	// fetch the model
	const model = models.find((m) => m.id === conv.model);

	if (!model) {
		throw error(410, "Model not available anymore");
	}

	// finally parse the content of the request
	const json = await request.json();

	const {
		inputs: newPrompt,
		id: messageId,
		is_retry: isRetry,
		is_continue: isContinue,
		web_search: webSearch,
		files: b64files,
	} = z
		.object({
			id: z.string().uuid().refine(isMessageId).optional(), // parent message id to append to for a normal message, or the message id for a retry/continue
			inputs: z.optional(
				z
					.string()
					.trim()
					.min(1)
					.transform((s) => s.replace(/\r\n/g, "\n"))
			),
			is_retry: z.optional(z.boolean()),
			is_continue: z.optional(z.boolean()),
			web_search: z.optional(z.boolean()),
			files: z.optional(z.array(z.string())),
		})
		.parse(json);

	if (usageLimits?.messageLength && (newPrompt?.length ?? 0) > usageLimits.messageLength) {
		throw error(400, "Message too long.");
	}
	// files is an array of base64 strings encoding Blob objects
	// we need to convert this array to an array of File objects

	const files = b64files?.map((file) => {
		const blob = Buffer.from(file, "base64");
		return new File([blob], "image.png");
	});

	// check sizes
	if (files) {
		const filechecks = await Promise.all(
			files.map(async (file) => {
				const dimensions = sizeof(Buffer.from(await file.arrayBuffer()));
				return (
					file.size > 2 * 1024 * 1024 ||
					(dimensions.width ?? 0) > 224 ||
					(dimensions.height ?? 0) > 224
				);
			})
		);

		if (filechecks.some((check) => check)) {
			throw error(413, "File too large, should be <2MB and 224x224 max.");
		}
	}

	let hashes: undefined | string[];

	if (files) {
		hashes = await Promise.all(files.map(async (file) => await uploadFile(file, conv)));
	}

	// we will append tokens to the content of this message
	let messageToWriteToId: Message["id"] | undefined = undefined;
	// used for building the prompt, subtree of the conversation that goes from the latest message to the root
	let messagesForPrompt: Message[] = [];

	if (isContinue && messageId) {
		// if it's the last message and we continue then we build the prompt up to the last message
		// we will strip the end tokens afterwards when the prompt is built
		if ((conv.messages.find((msg) => msg.id === messageId)?.children?.length ?? 0) > 0) {
			throw error(400, "Can only continue the last message");
		}
		messageToWriteToId = messageId;
		messagesForPrompt = buildSubtree(conv, messageId);
	} else if (isRetry && messageId) {
		// two cases, if we're retrying a user message with a newPrompt set,
		// it means we're editing a user message
		// if we're retrying on an assistant message, newPrompt cannot be set
		// it means we're retrying the last assistant message for a new answer

		const messageToRetry = conv.messages.find((message) => message.id === messageId);

		if (!messageToRetry) {
			throw error(404, "Message not found");
		}

		if (messageToRetry.from === "user" && newPrompt) {
			// add a sibling to this message from the user, with the alternative prompt
			// add a children to that sibling, where we can write to
			const newUserMessageId = addSibling(
				conv,
				{ from: "user", content: newPrompt, createdAt: new Date(), updatedAt: new Date() },
				messageId
			);
			messageToWriteToId = addChildren(
				conv,
				{
					from: "assistant",
					content: "",
					files: hashes,
					createdAt: new Date(),
					updatedAt: new Date(),
				},
				newUserMessageId
			);
			messagesForPrompt = buildSubtree(conv, newUserMessageId);
		} else if (messageToRetry.from === "assistant") {
			// we're retrying an assistant message, to generate a new answer
			// just add a sibling to the assistant answer where we can write to
			messageToWriteToId = addSibling(
				conv,
				{ from: "assistant", content: "", createdAt: new Date(), updatedAt: new Date() },
				messageId
			);
			messagesForPrompt = buildSubtree(conv, messageId);
			messagesForPrompt.pop(); // don't need the latest assistant message in the prompt since we're retrying it
		}
	} else {
		// just a normal linear conversation, so we add the user message
		// and the blank assistant message back to back
		const newUserMessageId = addChildren(
			conv,
			{
				from: "user",
				content: newPrompt ?? "",
				files: hashes,
				createdAt: new Date(),
				updatedAt: new Date(),
			},
			messageId
		);

		messageToWriteToId = addChildren(
			conv,
			{
				from: "assistant",
				content: "",
				createdAt: new Date(),
				updatedAt: new Date(),
			},
			newUserMessageId
		);
		// build the prompt from the user message
		messagesForPrompt = buildSubtree(conv, newUserMessageId);
	}

	const messageToWriteTo = conv.messages.find((message) => message.id === messageToWriteToId);
	if (!messageToWriteTo) {
		throw error(500, "Failed to create message");
	}
	if (messagesForPrompt.length === 0) {
		throw error(500, "Failed to create prompt");
	}

	// update the conversation with the new messages
	await collections.conversations.updateOne(
		{
			_id: convId,
		},
		{
			$set: {
				messages: conv.messages,
				title: conv.title,
				updatedAt: new Date(),
			},
		}
	);

	let doneStreaming = false;

	// we now build the stream
	const stream = new ReadableStream({
		async start(controller) {
			messageToWriteTo.updates ??= [];
			function update(newUpdate: MessageUpdate) {
				if (newUpdate.type !== "stream") {
					messageToWriteTo?.updates?.push(newUpdate);
				}

				if (newUpdate.type === "stream" && newUpdate.token === "") {
					return;
				}
				controller.enqueue(JSON.stringify(newUpdate) + "\n");

				if (newUpdate.type === "finalAnswer") {
					// 4096 of spaces to make sure the browser doesn't blocking buffer that holding the response
					controller.enqueue(" ".repeat(4096));
				}
			}

			update({ type: "status", status: "started" });

			const summarizeIfNeeded = (async () => {
				if (conv.title === "New Chat" && conv.messages.length === 3) {
					try {
						conv.title = (await summarize(conv.messages[1].content)) ?? conv.title;
						update({ type: "status", status: "title", message: conv.title });
						await collections.conversations.updateOne(
							{
								_id: convId,
							},
							{
								$set: {
									title: conv?.title,
									updatedAt: new Date(),
								},
							}
						);
					} catch (e) {
						console.error(e);
					}
				}
			})();

			await collections.conversations.updateOne(
				{
					_id: convId,
				},
				{
					$set: {
						title: conv.title,
						updatedAt: new Date(),
					},
				}
			);

			// check if assistant has a rag
			const assistant = await collections.assistants.findOne<
				Pick<Assistant, "rag" | "dynamicPrompt" | "generateSettings">
			>(
				{ _id: conv.assistantId },
				{ projection: { rag: 1, dynamicPrompt: 1, generateSettings: 1 } }
			);

			const assistantHasDynamicPrompt =
				ENABLE_ASSISTANTS_RAG === "true" && !!assistant && !!assistant?.dynamicPrompt;

			const assistantHasWebSearch =
				ENABLE_ASSISTANTS_RAG === "true" &&
				!!assistant &&
				!!assistant.rag &&
				(assistant.rag.allowedLinks.length > 0 ||
					assistant.rag.allowedDomains.length > 0 ||
					assistant.rag.allowAllDomains);

			// perform websearch if needed
			if (!isContinue && (webSearch || assistantHasWebSearch)) {
				messageToWriteTo.webSearch = await runWebSearch(
					conv,
					messagesForPrompt,
					update,
					assistant?.rag
				);
			}

			let preprompt = conv.preprompt;

			if (assistantHasDynamicPrompt && preprompt) {
				// process the preprompt
				const urlRegex = /{{\s?url=(.*?)\s?}}/g;
				let match;
				while ((match = urlRegex.exec(preprompt)) !== null) {
					try {
						const url = new URL(match[1]);
						if (await isURLLocal(url)) {
							throw new Error("URL couldn't be fetched, it resolved to a local address.");
						}

						const res = await fetch(url.href);

						if (!res.ok) {
							throw new Error("URL couldn't be fetched, error " + res.status);
						}
						const text = await res.text();
						preprompt = preprompt.replaceAll(match[0], text);
					} catch (e) {
						preprompt = preprompt.replaceAll(match[0], (e as Error).message);
					}
				}

				if (messagesForPrompt[0].from === "system") {
					messagesForPrompt[0].content = preprompt;
				}
			}

			// inject websearch result & optionally images into the messages
			const processedMessages = await preprocessMessages(
				messagesForPrompt,
				messageToWriteTo.webSearch,
				model.multimodal,
				convId
			);

			const previousText = messageToWriteTo.content;

			let hasError = false;

			let buffer = "";

			messageToWriteTo.updatedAt = new Date();

			try {
				const endpoint = await model.getEndpoint();
				for await (const output of await endpoint({
					messages: processedMessages,
					preprompt,
					continueMessage: isContinue,
					generateSettings: assistant?.generateSettings,
				})) {
					// if not generated_text is here it means the generation is not done
					if (!output.generated_text) {
						if (!output.token.special) {
							buffer += output.token.text;

							// send the first 5 chars
							// and leave the rest in the buffer
							if (buffer.length >= 5) {
								update({
									type: "stream",
									token: buffer.slice(0, 5),
								});
								buffer = buffer.slice(5);
							}

							// abort check
							const date = abortedGenerations.get(convId.toString());
							if (date && date > promptedAt) {
								break;
							}
							// no output check
							if (!output) {
								break;
							}

							// otherwise we just concatenate tokens
							messageToWriteTo.content += output.token.text;
						}
					} else {
						messageToWriteTo.interrupted =
							!output.token.special && !model.parameters.stop?.includes(output.token.text);
						// add output.generated text to the last message
						// strip end tokens from the output.generated_text
						const text = (model.parameters.stop ?? []).reduce((acc: string, curr: string) => {
							if (acc.endsWith(curr)) {
								messageToWriteTo.interrupted = false;
								return acc.slice(0, acc.length - curr.length);
							}
							return acc;
						}, output.generated_text.trimEnd());

						messageToWriteTo.content = previousText + text;
					}
				}
			} catch (e) {
				hasError = true;
				update({ type: "status", status: "error", message: (e as Error).message });
			} finally {
				// check if no output was generated
				if (!hasError && messageToWriteTo.content === previousText) {
					update({
						type: "status",
						status: "error",
						message: "No output was generated. Something went wrong.",
					});
				}

				if (buffer) {
					update({
						type: "stream",
						token: buffer,
					});
				}
			}

			await collections.conversations.updateOne(
				{
					_id: convId,
				},
				{
					$set: {
						messages: conv.messages,
						title: conv?.title,
						updatedAt: new Date(),
					},
				}
			);

			// used to detect if cancel() is called bc of interrupt or just because the connection closes
			doneStreaming = true;

			update({
				type: "finalAnswer",
				text: messageToWriteTo.content,
			});

			await summarizeIfNeeded;
			controller.close();
			return;
		},
		async cancel() {
			if (!doneStreaming) {
				await collections.conversations.updateOne(
					{
						_id: convId,
					},
					{
						$set: {
							messages: conv.messages,
							title: conv.title,
							updatedAt: new Date(),
						},
					}
				);
			}
		},
	});

	if (conv.assistantId) {
		await collections.assistantStats.updateOne(
			{ assistantId: conv.assistantId, "date.at": startOfHour(new Date()), "date.span": "hour" },
			{ $inc: { count: 1 } },
			{ upsert: true }
		);
	}

	// Todo: maybe we should wait for the message to be saved before ending the response - in case of errors
	return new Response(stream, {
		headers: {
			"Content-Type": "text/event-stream",
		},
	});
}

export async function DELETE({ locals, params }) {
	const convId = new ObjectId(params.id);

	const conv = await collections.conversations.findOne({
		_id: convId,
		...authCondition(locals),
	});

	if (!conv) {
		throw error(404, "Conversation not found");
	}

	await collections.conversations.deleteOne({ _id: conv._id });

	return new Response();
}

export async function PATCH({ request, locals, params }) {
	const { title } = z
		.object({ title: z.string().trim().min(1).max(100) })
		.parse(await request.json());

	const convId = new ObjectId(params.id);

	const conv = await collections.conversations.findOne({
		_id: convId,
		...authCondition(locals),
	});

	if (!conv) {
		throw error(404, "Conversation not found");
	}

	await collections.conversations.updateOne(
		{
			_id: convId,
		},
		{
			$set: {
				title,
			},
		}
	);

	return new Response();
}