jbilcke-hf HF staff commited on
Commit
11d758a
1 Parent(s): e22103d

we now have.. custom models ☄️.. and settings ✨

Browse files
package-lock.json CHANGED
@@ -62,6 +62,7 @@
62
  "tailwindcss-animate": "^1.0.6",
63
  "ts-node": "^10.9.1",
64
  "typescript": "5.1.6",
 
65
  "uuid": "^9.0.0",
66
  "zustand": "^4.4.1"
67
  },
@@ -6808,6 +6809,19 @@
6808
  "react": "^16.8.0 || ^17.0.0 || ^18.0.0"
6809
  }
6810
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
6811
  "node_modules/util-deprecate": {
6812
  "version": "1.0.2",
6813
  "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
 
62
  "tailwindcss-animate": "^1.0.6",
63
  "ts-node": "^10.9.1",
64
  "typescript": "5.1.6",
65
+ "usehooks-ts": "^2.9.1",
66
  "uuid": "^9.0.0",
67
  "zustand": "^4.4.1"
68
  },
 
6809
  "react": "^16.8.0 || ^17.0.0 || ^18.0.0"
6810
  }
6811
  },
6812
+ "node_modules/usehooks-ts": {
6813
+ "version": "2.9.1",
6814
+ "resolved": "https://registry.npmjs.org/usehooks-ts/-/usehooks-ts-2.9.1.tgz",
6815
+ "integrity": "sha512-2FAuSIGHlY+apM9FVlj8/oNhd+1y+Uwv5QNkMQz1oSfdHk4PXo1qoCw9I5M7j0vpH8CSWFJwXbVPeYDjLCx9PA==",
6816
+ "engines": {
6817
+ "node": ">=16.15.0",
6818
+ "npm": ">=8"
6819
+ },
6820
+ "peerDependencies": {
6821
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0",
6822
+ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0"
6823
+ }
6824
+ },
6825
  "node_modules/util-deprecate": {
6826
  "version": "1.0.2",
6827
  "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
package.json CHANGED
@@ -63,6 +63,7 @@
63
  "tailwindcss-animate": "^1.0.6",
64
  "ts-node": "^10.9.1",
65
  "typescript": "5.1.6",
 
66
  "uuid": "^9.0.0",
67
  "zustand": "^4.4.1"
68
  },
 
63
  "tailwindcss-animate": "^1.0.6",
64
  "ts-node": "^10.9.1",
65
  "typescript": "5.1.6",
66
+ "usehooks-ts": "^2.9.1",
67
  "uuid": "^9.0.0",
68
  "zustand": "^4.4.1"
69
  },
src/app/engine/presets.ts CHANGED
@@ -36,6 +36,18 @@ export const presets: Record<string, Preset> = {
36
  imagePrompt: (prompt: string) => [],
37
  negativePrompt: () => [],
38
  },
 
 
 
 
 
 
 
 
 
 
 
 
39
  japanese_manga: {
40
  id: "japanese_manga",
41
  label: "Japanese",
 
36
  imagePrompt: (prompt: string) => [],
37
  negativePrompt: () => [],
38
  },
39
+ neutral: {
40
+ id: "neutral",
41
+ label: "Neutral (no style)",
42
+ family: "american",
43
+ color: "color",
44
+ font: "actionman",
45
+ llmPrompt: "",
46
+ imagePrompt: (prompt: string) => [
47
+ prompt,
48
+ ],
49
+ negativePrompt: () => [ ],
50
+ },
51
  japanese_manga: {
52
  id: "japanese_manga",
53
  label: "Japanese",
src/app/engine/render.ts CHANGED
@@ -2,43 +2,46 @@
2
 
3
  import { v4 as uuidv4 } from "uuid"
4
  import Replicate from "replicate"
5
- import OpenAI from "openai"
6
 
7
- import { RenderRequest, RenderedScene, RenderingEngine } from "@/types"
8
  import { generateSeed } from "@/lib/generateSeed"
9
  import { sleep } from "@/lib/sleep"
10
 
11
- const renderingEngine = `${process.env.RENDERING_ENGINE || ""}` as RenderingEngine
12
 
13
  // TODO: we should split Hugging Face and Replicate backends into separate files
14
- const huggingFaceToken = `${process.env.AUTH_HF_API_TOKEN || ""}`
15
- const huggingFaceInferenceEndpointUrl = `${process.env.RENDERING_HF_INFERENCE_ENDPOINT_URL || ""}`
16
- const huggingFaceInferenceApiBaseModel = `${process.env.RENDERING_HF_INFERENCE_API_BASE_MODEL || ""}`
17
- const huggingFaceInferenceApiRefinerModel = `${process.env.RENDERING_HF_INFERENCE_API_REFINER_MODEL || ""}`
 
18
 
19
- const replicateToken = `${process.env.AUTH_REPLICATE_API_TOKEN || ""}`
20
- const replicateModel = `${process.env.RENDERING_REPLICATE_API_MODEL || ""}`
21
- const replicateModelVersion = `${process.env.RENDERING_REPLICATE_API_MODEL_VERSION || ""}`
 
22
 
23
  const videochainToken = `${process.env.AUTH_VIDEOCHAIN_API_TOKEN || ""}`
24
  const videochainApiUrl = `${process.env.RENDERING_VIDEOCHAIN_API_URL || ""}`
25
 
26
- const openaiApiKey = `${process.env.AUTH_OPENAI_API_KEY || ""}`
27
- const openaiApiBaseUrl = `${process.env.RENDERING_OPENAI_API_BASE_URL || "https://api.openai.com/v1"}`
28
- const openaiApiModel = `${process.env.RENDERING_OPENAI_API_MODEL || "dall-e-3"}`
29
 
30
  export async function newRender({
31
  prompt,
32
  // negativePrompt,
33
  width,
34
  height,
35
- withCache
 
36
  }: {
37
  prompt: string
38
  // negativePrompt: string[]
39
  width: number
40
  height: number
41
  withCache: boolean
 
42
  }) {
43
  // throw new Error("Planned maintenance")
44
 
@@ -61,6 +64,59 @@ export async function newRender({
61
  const nbInferenceSteps = 30
62
  const guidanceScale = 9
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  try {
65
  if (renderingEngine === "OPENAI") {
66
 
@@ -89,7 +145,7 @@ export async function newRender({
89
  })
90
  */
91
 
92
- const res = await fetch(`${openaiApiBaseUrl}/images/generations`, {
93
  method: "POST",
94
  headers: {
95
  Accept: "application/json",
@@ -97,7 +153,7 @@ export async function newRender({
97
  Authorization: `Bearer ${openaiApiKey}`,
98
  },
99
  body: JSON.stringify({
100
- model: "dall-e-3",
101
  prompt,
102
  n: 1,
103
  size,
@@ -125,28 +181,29 @@ export async function newRender({
125
  segments: []
126
  } as RenderedScene
127
  } else if (renderingEngine === "REPLICATE") {
128
- if (!replicateToken) {
129
- throw new Error(`you need to configure your REPLICATE_API_TOKEN in order to use the REPLICATE rendering engine`)
130
  }
131
- if (!replicateModel) {
132
- throw new Error(`you need to configure your REPLICATE_API_MODEL in order to use the REPLICATE rendering engine`)
133
  }
134
- if (!replicateModelVersion) {
135
- throw new Error(`you need to configure your REPLICATE_API_MODEL_VERSION in order to use the REPLICATE rendering engine`)
136
  }
137
- const replicate = new Replicate({ auth: replicateToken })
138
 
139
  const seed = generateSeed()
140
  const prediction = await replicate.predictions.create({
141
- version: replicateModelVersion,
142
  input: {
143
  prompt: [
144
  "beautiful",
145
  // "intricate details",
 
146
  prompt,
147
  "award winning",
148
  "high resolution"
149
- ].join(", "),
150
  width,
151
  height,
152
  seed
@@ -167,36 +224,37 @@ export async function newRender({
167
  segments: []
168
  } as RenderedScene
169
  } if (renderingEngine === "INFERENCE_ENDPOINT" || renderingEngine === "INFERENCE_API") {
170
- if (!huggingFaceToken) {
171
- throw new Error(`you need to configure your HF_API_TOKEN in order to use the ${renderingEngine} rendering engine`)
172
  }
173
- if (renderingEngine === "INFERENCE_ENDPOINT" && !huggingFaceInferenceEndpointUrl) {
174
- throw new Error(`you need to configure your RENDERING_HF_INFERENCE_ENDPOINT_URL in order to use the INFERENCE_ENDPOINT rendering engine`)
175
  }
176
- if (renderingEngine === "INFERENCE_API" && !huggingFaceInferenceApiBaseModel) {
177
- throw new Error(`you need to configure your RENDERING_HF_INFERENCE_API_BASE_MODEL in order to use the INFERENCE_API rendering engine`)
178
  }
179
- if (renderingEngine === "INFERENCE_API" && !huggingFaceInferenceApiRefinerModel) {
180
- throw new Error(`you need to configure your RENDERING_HF_INFERENCE_API_REFINER_MODEL in order to use the INFERENCE_API rendering engine`)
181
  }
182
 
183
  const baseModelUrl = renderingEngine === "INFERENCE_ENDPOINT"
184
- ? huggingFaceInferenceEndpointUrl
185
- : `https://api-inference.huggingface.co/models/${huggingFaceInferenceApiBaseModel}`
186
 
187
  const positivePrompt = [
188
  "beautiful",
189
  // "intricate details",
 
190
  prompt,
191
  "award winning",
192
  "high resolution"
193
- ].join(", ")
194
 
195
  const res = await fetch(baseModelUrl, {
196
  method: "POST",
197
  headers: {
198
  "Content-Type": "application/json",
199
- Authorization: `Bearer ${huggingFaceToken}`,
200
  },
201
  body: JSON.stringify({
202
  inputs: positivePrompt,
@@ -235,15 +293,13 @@ export async function newRender({
235
 
236
  if (renderingEngine === "INFERENCE_API") {
237
  try {
238
- const refinerModelUrl = `https://api-inference.huggingface.co/models/${huggingFaceInferenceApiRefinerModel}`
239
-
240
-
241
 
242
  const res = await fetch(refinerModelUrl, {
243
  method: "POST",
244
  headers: {
245
  "Content-Type": "application/json",
246
- Authorization: `Bearer ${huggingFaceToken}`,
247
  },
248
  body: JSON.stringify({
249
  inputs: Buffer.from(blob).toString('base64'),
@@ -283,7 +339,6 @@ export async function newRender({
283
  }
284
  }
285
 
286
-
287
  return {
288
  renderId: uuidv4(),
289
  status: "completed",
@@ -341,13 +396,65 @@ export async function newRender({
341
  }
342
  }
343
 
344
- export async function getRender(renderId: string) {
345
  if (!renderId) {
346
  const error = `cannot call the rendering API without a renderId, aborting..`
347
  console.error(error)
348
  throw new Error(error)
349
  }
350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
  let defaulResult: RenderedScene = {
352
  renderId: "",
353
  status: "pending",
@@ -360,17 +467,14 @@ export async function getRender(renderId: string) {
360
 
361
  try {
362
  if (renderingEngine === "REPLICATE") {
363
- if (!replicateToken) {
364
- throw new Error(`you need to configure your AUTH_REPLICATE_API_TOKEN in order to use the REPLICATE rendering engine`)
365
- }
366
- if (!replicateModel) {
367
- throw new Error(`you need to configure your RENDERING_REPLICATE_API_MODEL in order to use the REPLICATE rendering engine`)
368
  }
369
 
370
  const res = await fetch(`https://api.replicate.com/v1/predictions/${renderId}`, {
371
  method: "GET",
372
  headers: {
373
- Authorization: `Token ${replicateToken}`,
374
  },
375
  cache: 'no-store',
376
  // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
 
2
 
3
  import { v4 as uuidv4 } from "uuid"
4
  import Replicate from "replicate"
 
5
 
6
+ import { RenderRequest, RenderedScene, RenderingEngine, Settings } from "@/types"
7
  import { generateSeed } from "@/lib/generateSeed"
8
  import { sleep } from "@/lib/sleep"
9
 
10
+ const serverRenderingEngine = `${process.env.RENDERING_ENGINE || ""}` as RenderingEngine
11
 
12
  // TODO: we should split Hugging Face and Replicate backends into separate files
13
+ const serverHuggingfaceApiKey = `${process.env.AUTH_HF_API_TOKEN || ""}`
14
+ const serverHuggingfaceApiUrl = `${process.env.RENDERING_HF_INFERENCE_ENDPOINT_URL || ""}`
15
+ const serverHuggingfaceInferenceApiModel = `${process.env.RENDERING_HF_INFERENCE_API_BASE_MODEL || ""}`
16
+ const serverHuggingfaceInferenceApiModelRefinerModel = `${process.env.RENDERING_HF_INFERENCE_API_REFINER_MODEL || ""}`
17
+ const serverHuggingfaceInferenceApiModelTrigger = `${process.env.RENDERING_HF_INFERENCE_API_MODEL_TRIGGER || "style of TOK"}`
18
 
19
+ const serverReplicateApiKey = `${process.env.AUTH_REPLICATE_API_TOKEN || ""}`
20
+ const serverReplicateApiModel = `${process.env.RENDERING_REPLICATE_API_MODEL || ""}`
21
+ const serverReplicateApiModelVersion = `${process.env.RENDERING_REPLICATE_API_MODEL_VERSION || ""}`
22
+ const serverReplicateApiModelTrigger = `${process.env.RENDERING_REPLICATE_API_MODEL_TRIGGER || ""}`
23
 
24
  const videochainToken = `${process.env.AUTH_VIDEOCHAIN_API_TOKEN || ""}`
25
  const videochainApiUrl = `${process.env.RENDERING_VIDEOCHAIN_API_URL || ""}`
26
 
27
+ const serverOpenaiApiKey = `${process.env.AUTH_OPENAI_API_KEY || ""}`
28
+ const serverOpenaiApiBaseUrl = `${process.env.RENDERING_OPENAI_API_BASE_URL || "https://api.openai.com/v1"}`
29
+ const serverOpenaiApiModel = `${process.env.RENDERING_OPENAI_API_MODEL || "dall-e-3"}`
30
 
31
  export async function newRender({
32
  prompt,
33
  // negativePrompt,
34
  width,
35
  height,
36
+ withCache,
37
+ settings,
38
  }: {
39
  prompt: string
40
  // negativePrompt: string[]
41
  width: number
42
  height: number
43
  withCache: boolean
44
+ settings: Settings
45
  }) {
46
  // throw new Error("Planned maintenance")
47
 
 
64
  const nbInferenceSteps = 30
65
  const guidanceScale = 9
66
 
67
+ let renderingEngine = serverRenderingEngine
68
+ let openaiApiKey = serverOpenaiApiKey
69
+ let openaiApiModel = serverOpenaiApiModel
70
+
71
+ let replicateApiKey = serverReplicateApiKey
72
+ let replicateApiModel = serverReplicateApiModel
73
+ let replicateApiModelVersion = serverReplicateApiModelVersion
74
+ let replicateApiModelTrigger = serverReplicateApiModelTrigger
75
+
76
+ let huggingfaceApiKey = serverHuggingfaceApiKey
77
+ let huggingfaceInferenceApiModel = serverHuggingfaceInferenceApiModel
78
+ let huggingfaceApiUrl = serverHuggingfaceApiUrl
79
+ let huggingfaceInferenceApiModelRefinerModel = serverHuggingfaceInferenceApiModelRefinerModel
80
+ let huggingfaceInferenceApiModelTrigger = serverHuggingfaceInferenceApiModelTrigger
81
+
82
+ const placeholder = "<USE YOUR OWN TOKEN>"
83
+
84
+ if (
85
+ settings.renderingModelVendor === "OPENAI" &&
86
+ settings.openaiApiKey &&
87
+ settings.openaiApiKey !== placeholder &&
88
+ settings.openaiApiModel
89
+ ) {
90
+ console.log("using OpenAI using user credentials (hidden)")
91
+ renderingEngine = "OPENAI"
92
+ openaiApiKey = settings.openaiApiKey
93
+ openaiApiModel = settings.openaiApiModel
94
+ } if (
95
+ settings.renderingModelVendor === "REPLICATE" &&
96
+ settings.replicateApiKey &&
97
+ settings.replicateApiKey !== placeholder &&
98
+ settings.replicateApiModel &&
99
+ settings.replicateApiModelVersion
100
+ ) {
101
+ console.log("using Replicate using user credentials (hidden)")
102
+ renderingEngine = "REPLICATE"
103
+ replicateApiKey = settings.replicateApiKey
104
+ replicateApiModel = settings.replicateApiModel
105
+ replicateApiModelVersion = settings.replicateApiModelVersion
106
+ replicateApiModelTrigger = settings.replicateApiModelTrigger
107
+ } else if (
108
+ settings.renderingModelVendor === "HUGGINGFACE" &&
109
+ settings.huggingfaceApiKey &&
110
+ settings.huggingfaceApiKey !== placeholder &&
111
+ settings.huggingfaceInferenceApiModel
112
+ ) {
113
+ console.log("using Hugging Face using user credentials (hidden)")
114
+ renderingEngine = "INFERENCE_API"
115
+ huggingfaceApiKey = settings.huggingfaceApiKey
116
+ huggingfaceInferenceApiModel = settings.huggingfaceInferenceApiModel
117
+ huggingfaceInferenceApiModelTrigger = settings.huggingfaceInferenceApiModelTrigger
118
+ }
119
+
120
  try {
121
  if (renderingEngine === "OPENAI") {
122
 
 
145
  })
146
  */
147
 
148
+ const res = await fetch(`${serverOpenaiApiBaseUrl}/images/generations`, {
149
  method: "POST",
150
  headers: {
151
  Accept: "application/json",
 
153
  Authorization: `Bearer ${openaiApiKey}`,
154
  },
155
  body: JSON.stringify({
156
+ model: openaiApiModel,
157
  prompt,
158
  n: 1,
159
  size,
 
181
  segments: []
182
  } as RenderedScene
183
  } else if (renderingEngine === "REPLICATE") {
184
+ if (!replicateApiKey) {
185
+ throw new Error(`invalid replicateApiKey, you need to configure your REPLICATE_API_TOKEN in order to use the REPLICATE rendering engine`)
186
  }
187
+ if (!replicateApiModel) {
188
+ throw new Error(`invalid replicateApiModel, you need to configure your REPLICATE_API_MODEL in order to use the REPLICATE rendering engine`)
189
  }
190
+ if (!replicateApiModelVersion) {
191
+ throw new Error(`invalid replicateApiModelVersion, you need to configure your REPLICATE_API_MODEL_VERSION in order to use the REPLICATE rendering engine`)
192
  }
193
+ const replicate = new Replicate({ auth: replicateApiKey })
194
 
195
  const seed = generateSeed()
196
  const prediction = await replicate.predictions.create({
197
+ version: replicateApiModelVersion,
198
  input: {
199
  prompt: [
200
  "beautiful",
201
  // "intricate details",
202
+ replicateApiModelTrigger || "",
203
  prompt,
204
  "award winning",
205
  "high resolution"
206
+ ].filter(x => x).join(", "),
207
  width,
208
  height,
209
  seed
 
224
  segments: []
225
  } as RenderedScene
226
  } if (renderingEngine === "INFERENCE_ENDPOINT" || renderingEngine === "INFERENCE_API") {
227
+ if (!huggingfaceApiKey) {
228
+ throw new Error(`invalid huggingfaceApiKey, you need to configure your HF_API_TOKEN in order to use the ${renderingEngine} rendering engine`)
229
  }
230
+ if (renderingEngine === "INFERENCE_ENDPOINT" && !huggingfaceApiUrl) {
231
+ throw new Error(`invalid huggingfaceApiUrl, you need to configure your RENDERING_HF_INFERENCE_ENDPOINT_URL in order to use the INFERENCE_ENDPOINT rendering engine`)
232
  }
233
+ if (renderingEngine === "INFERENCE_API" && !huggingfaceInferenceApiModel) {
234
+ throw new Error(`invalid huggingfaceInferenceApiModel, you need to configure your RENDERING_HF_INFERENCE_API_BASE_MODEL in order to use the INFERENCE_API rendering engine`)
235
  }
236
+ if (renderingEngine === "INFERENCE_API" && !huggingfaceInferenceApiModelRefinerModel) {
237
+ throw new Error(`invalid huggingfaceInferenceApiModelRefinerModel, you need to configure your RENDERING_HF_INFERENCE_API_REFINER_MODEL in order to use the INFERENCE_API rendering engine`)
238
  }
239
 
240
  const baseModelUrl = renderingEngine === "INFERENCE_ENDPOINT"
241
+ ? huggingfaceApiUrl
242
+ : `https://api-inference.huggingface.co/models/${huggingfaceInferenceApiModel}`
243
 
244
  const positivePrompt = [
245
  "beautiful",
246
  // "intricate details",
247
+ huggingfaceInferenceApiModelTrigger || "",
248
  prompt,
249
  "award winning",
250
  "high resolution"
251
+ ].filter(x => x).join(", ")
252
 
253
  const res = await fetch(baseModelUrl, {
254
  method: "POST",
255
  headers: {
256
  "Content-Type": "application/json",
257
+ Authorization: `Bearer ${huggingfaceApiKey}`,
258
  },
259
  body: JSON.stringify({
260
  inputs: positivePrompt,
 
293
 
294
  if (renderingEngine === "INFERENCE_API") {
295
  try {
296
+ const refinerModelUrl = `https://api-inference.huggingface.co/models/${huggingfaceInferenceApiModelRefinerModel}`
 
 
297
 
298
  const res = await fetch(refinerModelUrl, {
299
  method: "POST",
300
  headers: {
301
  "Content-Type": "application/json",
302
+ Authorization: `Bearer ${huggingfaceApiKey}`,
303
  },
304
  body: JSON.stringify({
305
  inputs: Buffer.from(blob).toString('base64'),
 
339
  }
340
  }
341
 
 
342
  return {
343
  renderId: uuidv4(),
344
  status: "completed",
 
396
  }
397
  }
398
 
399
+ export async function getRender(renderId: string, settings: Settings) {
400
  if (!renderId) {
401
  const error = `cannot call the rendering API without a renderId, aborting..`
402
  console.error(error)
403
  throw new Error(error)
404
  }
405
 
406
+
407
+ let renderingEngine = serverRenderingEngine
408
+ let openaiApiKey = serverOpenaiApiKey
409
+ let openaiApiModel = serverOpenaiApiModel
410
+
411
+ let replicateApiKey = serverReplicateApiKey
412
+ let replicateApiModel = serverReplicateApiModel
413
+ let replicateApiModelVersion = serverReplicateApiModelVersion
414
+ let replicateApiModelTrigger = serverReplicateApiModelTrigger
415
+
416
+ let huggingfaceApiKey = serverHuggingfaceApiKey
417
+ let huggingfaceInferenceApiModel = serverHuggingfaceInferenceApiModel
418
+ let huggingfaceInferenceApiModelTrigger = serverHuggingfaceInferenceApiModelTrigger
419
+ let huggingfaceApiUrl = serverHuggingfaceApiUrl
420
+ let huggingfaceInferenceApiModelRefinerModel = serverHuggingfaceInferenceApiModelRefinerModel
421
+
422
+ const placeholder = "<USE YOUR OWN TOKEN>"
423
+
424
+ if (
425
+ settings.renderingModelVendor === "OPENAI" &&
426
+ settings.openaiApiKey &&
427
+ settings.openaiApiKey !== placeholder &&
428
+ settings.openaiApiModel
429
+ ) {
430
+ renderingEngine = "OPENAI"
431
+ openaiApiKey = settings.openaiApiKey
432
+ openaiApiModel = settings.openaiApiModel
433
+ } if (
434
+ settings.renderingModelVendor === "REPLICATE" &&
435
+ settings.replicateApiKey &&
436
+ settings.replicateApiKey !== placeholder &&
437
+ settings.replicateApiModel &&
438
+ settings.replicateApiModelVersion
439
+ ) {
440
+ renderingEngine = "REPLICATE"
441
+ replicateApiKey = settings.replicateApiKey
442
+ replicateApiModel = settings.replicateApiModel
443
+ replicateApiModelVersion = settings.replicateApiModelVersion
444
+ replicateApiModelTrigger = settings.replicateApiModelTrigger
445
+ } else if (
446
+ settings.renderingModelVendor === "HUGGINGFACE" &&
447
+ settings.huggingfaceApiKey &&
448
+ settings.huggingfaceApiKey !== placeholder &&
449
+ settings.huggingfaceInferenceApiModel
450
+ ) {
451
+ // console.log("using Hugging Face using user credentials (hidden)")
452
+ renderingEngine = "INFERENCE_API"
453
+ huggingfaceApiKey = settings.huggingfaceApiKey
454
+ huggingfaceInferenceApiModel = settings.huggingfaceInferenceApiModel
455
+ huggingfaceInferenceApiModelTrigger = settings.huggingfaceInferenceApiModelTrigger
456
+ }
457
+
458
  let defaulResult: RenderedScene = {
459
  renderId: "",
460
  status: "pending",
 
467
 
468
  try {
469
  if (renderingEngine === "REPLICATE") {
470
+ if (!replicateApiKey) {
471
+ throw new Error(`invalid replicateApiKey, you need to configure your AUTH_REPLICATE_API_TOKEN in order to use the REPLICATE rendering engine`)
 
 
 
472
  }
473
 
474
  const res = await fetch(`https://api.replicate.com/v1/predictions/${renderId}`, {
475
  method: "GET",
476
  headers: {
477
+ Authorization: `Token ${replicateApiKey}`,
478
  },
479
  cache: 'no-store',
480
  // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
src/app/interface/bottom-bar/index.tsx CHANGED
@@ -7,6 +7,7 @@ import { upscaleImage } from "@/app/engine/render"
7
  import { sleep } from "@/lib/sleep"
8
  import { AIClipFactory } from "../ai-clip-factory"
9
  import { Share } from "../share"
 
10
 
11
  export function BottomBar() {
12
  const download = useStore(state => state.download)
@@ -88,21 +89,16 @@ export function BottomBar() {
88
  `space-x-3`,
89
  `scale-[0.9]`
90
  )}>
91
- <div>
92
- {
93
- // there is an issue, this env check doesn't work..
94
- // process.env.NEXT_PUBLIC_CAN_UPSCALE === "true" ?
95
- <Button
96
- onClick={handleUpscale}
97
- disabled={!prompt?.length || remainingImages > 0 || isUpscaling || !Object.values(upscaleQueue).length}
98
- >
99
- {isUpscaling
100
  ? `${allStatus.length - Object.values(upscaleQueue).length}/${allStatus.length} ⌛`
101
  : "Upscale"}
102
- </Button>
103
- // : null
104
- }
105
- </div>
106
  {/*
107
  <div>
108
  <Button
 
7
  import { sleep } from "@/lib/sleep"
8
  import { AIClipFactory } from "../ai-clip-factory"
9
  import { Share } from "../share"
10
+ import { SettingsDialog } from "../settings-dialog"
11
 
12
  export function BottomBar() {
13
  const download = useStore(state => state.download)
 
89
  `space-x-3`,
90
  `scale-[0.9]`
91
  )}>
92
+ <SettingsDialog />
93
+ <Button
94
+ onClick={handleUpscale}
95
+ disabled={!prompt?.length || remainingImages > 0 || isUpscaling || !Object.values(upscaleQueue).length}
96
+ >
97
+ {isUpscaling
 
 
 
98
  ? `${allStatus.length - Object.values(upscaleQueue).length}/${allStatus.length} ⌛`
99
  : "Upscale"}
100
+ </Button>
101
+
 
 
102
  {/*
103
  <div>
104
  <Button
src/app/interface/panel/index.tsx CHANGED
@@ -13,6 +13,7 @@ import { getInitialRenderedScene } from "@/lib/getInitialRenderedScene"
13
  import { Progress } from "@/app/interface/progress"
14
  import { EditModal } from "../edit-modal"
15
  import { Bubble } from "./bubble"
 
16
 
17
  export function Panel({
18
  page,
@@ -124,7 +125,8 @@ export function Panel({
124
 
125
  // TODO: here we never reset the revision, so only the first user
126
  // comic will be cached (we should fix that later)
127
- withCache: revision === 0
 
128
  })
129
  } catch (err) {
130
  // "Failed to load the panel! Don't worry, we are retrying..")
@@ -133,6 +135,7 @@ export function Panel({
133
  width,
134
  height,
135
  withCache,
 
136
  })
137
  }
138
 
@@ -174,7 +177,7 @@ export function Panel({
174
 
175
  try {
176
  setGeneratingImages(panelId, true)
177
- const newRendered = await getRender(renderedRef.current.renderId)
178
 
179
  if (JSON.stringify(renderedRef.current) !== JSON.stringify(newRendered)) {
180
  setRendered(panelId, renderedRef.current = newRendered)
@@ -191,6 +194,7 @@ export function Panel({
191
  width,
192
  height,
193
  withCache: false,
 
194
  })
195
  setRendered(panelId, newAttempt)
196
  } catch (err) {
 
13
  import { Progress } from "@/app/interface/progress"
14
  import { EditModal } from "../edit-modal"
15
  import { Bubble } from "./bubble"
16
+ import { getSettings } from "../settings-dialog/getSettings"
17
 
18
  export function Panel({
19
  page,
 
125
 
126
  // TODO: here we never reset the revision, so only the first user
127
  // comic will be cached (we should fix that later)
128
+ withCache: revision === 0,
129
+ settings: getSettings(),
130
  })
131
  } catch (err) {
132
  // "Failed to load the panel! Don't worry, we are retrying..")
 
135
  width,
136
  height,
137
  withCache,
138
+ settings: getSettings(),
139
  })
140
  }
141
 
 
177
 
178
  try {
179
  setGeneratingImages(panelId, true)
180
+ const newRendered = await getRender(renderedRef.current.renderId, getSettings())
181
 
182
  if (JSON.stringify(renderedRef.current) !== JSON.stringify(newRendered)) {
183
  setRendered(panelId, renderedRef.current = newRendered)
 
194
  width,
195
  height,
196
  withCache: false,
197
+ settings: getSettings(),
198
  })
199
  setRendered(panelId, newAttempt)
200
  } catch (err) {
src/app/interface/settings-dialog/defaultSettings.ts ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { RenderingModelVendor, Settings } from "@/types"
2
+
3
+ export const defaultSettings: Settings = {
4
+ renderingModelVendor: "SERVER" as RenderingModelVendor,
5
+ huggingfaceApiKey: "<USE YOUR OWN TOKEN>",
6
+ huggingfaceInferenceApiModel: "stabilityai/stable-diffusion-xl-base-1.0",
7
+ huggingfaceInferenceApiModelTrigger: "",
8
+ replicateApiKey: "<USE YOUR OWN TOKEN>",
9
+ replicateApiModel: "stabilityai/sdxl",
10
+ replicateApiModelVersion: "da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf",
11
+ replicateApiModelTrigger: "style of TOK",
12
+ openaiApiKey: "<USE YOUR OWN TOKEN>",
13
+ openaiApiModel: "dall-e-3",
14
+ }
src/app/interface/settings-dialog/field.tsx ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import { ReactNode } from "react"
2
+
3
+ export function Field({ children }: { children: ReactNode }) {
4
+ return (
5
+ <div className="flex flex-col space-y-2">{children}</div>
6
+ )
7
+ }
src/app/interface/settings-dialog/getSettings.ts ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { RenderingModelVendor, Settings } from "@/types"
2
+
3
+ import { getValidString } from "@/lib/getValidString"
4
+ import { localStorageKeys } from "./localStorageKeys"
5
+ import { defaultSettings } from "./defaultSettings"
6
+
7
+ export function getSettings(): Settings {
8
+ try {
9
+ return {
10
+ renderingModelVendor: getValidString(localStorage?.getItem?.(localStorageKeys.renderingModelVendor), defaultSettings.renderingModelVendor) as RenderingModelVendor,
11
+ huggingfaceApiKey: getValidString(localStorage?.getItem?.(localStorageKeys.huggingfaceApiKey), defaultSettings.huggingfaceApiKey),
12
+ huggingfaceInferenceApiModel: getValidString(localStorage?.getItem?.(localStorageKeys.huggingfaceInferenceApiModel), defaultSettings.huggingfaceInferenceApiModel),
13
+ huggingfaceInferenceApiModelTrigger: getValidString(localStorage?.getItem?.(localStorageKeys.huggingfaceInferenceApiModelTrigger), defaultSettings.huggingfaceInferenceApiModelTrigger),
14
+ replicateApiKey: getValidString(localStorage?.getItem?.(localStorageKeys.replicateApiKey), defaultSettings.replicateApiKey),
15
+ replicateApiModel: getValidString(localStorage?.getItem?.(localStorageKeys.replicateApiModel), defaultSettings.replicateApiModel),
16
+ replicateApiModelVersion: getValidString(localStorage?.getItem?.(localStorageKeys.replicateApiModelVersion), defaultSettings.replicateApiModelVersion),
17
+ replicateApiModelTrigger: getValidString(localStorage?.getItem?.(localStorageKeys.replicateApiModelTrigger), defaultSettings.replicateApiModelTrigger),
18
+ openaiApiKey: getValidString(localStorage?.getItem?.(localStorageKeys.openaiApiKey), defaultSettings.openaiApiKey),
19
+ openaiApiModel: getValidString(localStorage?.getItem?.(localStorageKeys.openaiApiModel), defaultSettings.openaiApiModel),
20
+ }
21
+ } catch (err) {
22
+ return {
23
+ ...defaultSettings
24
+ }
25
+ }
26
+ }
src/app/interface/settings-dialog/index.tsx ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from "react"
2
+ import { useLocalStorage } from 'usehooks-ts'
3
+
4
+ import { Button } from "@/components/ui/button"
5
+ import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger } from "@/components/ui/dialog"
6
+ import {
7
+ Select,
8
+ SelectContent,
9
+ SelectItem,
10
+ SelectTrigger,
11
+ SelectValue,
12
+ } from "@/components/ui/select"
13
+
14
+ import { RenderingModelVendor } from "@/types"
15
+ import { Input } from "@/components/ui/input"
16
+
17
+ import { Label } from "./label"
18
+ import { Field } from "./field"
19
+ import { localStorageKeys } from "./localStorageKeys"
20
+ import { defaultSettings } from "./defaultSettings"
21
+
22
+ export function SettingsDialog() {
23
+ const [isOpen, setOpen] = useState(false)
24
+ const [renderingModelVendor, setRenderingModelVendor] = useLocalStorage<RenderingModelVendor>(
25
+ localStorageKeys.renderingModelVendor,
26
+ defaultSettings.renderingModelVendor
27
+ )
28
+ const [huggingfaceApiKey, setHuggingfaceApiKey] = useLocalStorage<string>(
29
+ localStorageKeys.huggingfaceApiKey,
30
+ defaultSettings.huggingfaceApiKey
31
+ )
32
+ const [huggingfaceInferenceApiModel, setHuggingfaceInferenceApiModel] = useLocalStorage<string>(
33
+ localStorageKeys.huggingfaceInferenceApiModel,
34
+ defaultSettings.huggingfaceInferenceApiModel
35
+ )
36
+ const [huggingfaceInferenceApiModelTrigger, setHuggingfaceInferenceApiModelTrigger] = useLocalStorage<string>(
37
+ localStorageKeys.huggingfaceInferenceApiModelTrigger,
38
+ defaultSettings.huggingfaceInferenceApiModelTrigger
39
+ )
40
+ const [replicateApiKey, setReplicateApiKey] = useLocalStorage<string>(
41
+ localStorageKeys.replicateApiKey,
42
+ defaultSettings.replicateApiKey
43
+ )
44
+ const [replicateApiModel, setReplicateApiModel] = useLocalStorage<string>(
45
+ localStorageKeys.replicateApiModel,
46
+ defaultSettings.replicateApiModel
47
+ )
48
+ const [replicateApiModelVersion, setReplicateApiModelVersion] = useLocalStorage<string>(
49
+ localStorageKeys.replicateApiModelVersion,
50
+ defaultSettings.replicateApiModelVersion
51
+ )
52
+ const [replicateApiModelTrigger, setReplicateApiModelTrigger] = useLocalStorage<string>(
53
+ localStorageKeys.replicateApiModelTrigger,
54
+ defaultSettings.replicateApiModelTrigger
55
+ )
56
+ const [openaiApiKey, setOpenaiApiKey] = useLocalStorage<string>(
57
+ localStorageKeys.openaiApiKey,
58
+ defaultSettings.openaiApiKey
59
+ )
60
+ const [openaiApiModel, setOpenaiApiModel] = useLocalStorage<string>(
61
+ localStorageKeys.openaiApiModel,
62
+ defaultSettings.openaiApiModel
63
+ )
64
+
65
+ return (
66
+ <Dialog open={isOpen} onOpenChange={setOpen}>
67
+ <DialogTrigger asChild>
68
+ <Button className="space-x-1 md:space-x-2">
69
+ <div>
70
+ <span className="hidden md:inline">Settings</span>
71
+ </div>
72
+ </Button>
73
+ </DialogTrigger>
74
+ <DialogContent className="sm:max-w-[500px]">
75
+ <DialogHeader>
76
+ <DialogDescription className="w-full text-center text-lg font-bold text-stone-800">
77
+ Custom Settings
78
+ </DialogDescription>
79
+ </DialogHeader>
80
+ <div className="grid gap-4 py-1 space-y-1 text-stone-800">
81
+ <p className="text-sm text-zinc-700">
82
+ Note: most vendors have a warm-up delay when using a custom or rarely used model. Do not hesitate to try again after 5 minutes if that happens.
83
+ </p>
84
+ <p className="text-sm text-zinc-700">
85
+ Security note: we do not save those settings on our side, instead they are stored inside your web browser, using the local storage.
86
+ </p>
87
+ <Field>
88
+ <Label>Image vendor:</Label>
89
+ <Select
90
+ onValueChange={(value: string) => {
91
+ setRenderingModelVendor(value as RenderingModelVendor)
92
+ }}
93
+ defaultValue={renderingModelVendor}>
94
+ <SelectTrigger className="">
95
+ <SelectValue placeholder="Theme" />
96
+ </SelectTrigger>
97
+ <SelectContent>
98
+ <SelectItem value="SERVER">Use server settings (default, recommended)</SelectItem>
99
+ <SelectItem value="HUGGINGFACE">Custom Hugging Face model (expert users)</SelectItem>
100
+ <SelectItem value="REPLICATE">Custom Replicate model (expert users)</SelectItem>
101
+ <SelectItem value="OPENAI">DALL·E 3 by OpenAI (partial support, in alpha)</SelectItem>
102
+ </SelectContent>
103
+ </Select>
104
+ </Field>
105
+
106
+ {renderingModelVendor === "HUGGINGFACE" && <>
107
+ <Field>
108
+ <Label>Hugging Face API Token:</Label>
109
+ <Input
110
+ className="font-mono"
111
+ type="password"
112
+ placeholder="Enter your private api token"
113
+ onChange={(x) => {
114
+ setHuggingfaceApiKey(x.target.value)
115
+ }}
116
+ value={huggingfaceApiKey}
117
+ />
118
+ </Field>
119
+ <Field>
120
+ <Label>Hugging Face Inference API model:</Label>
121
+ <Input
122
+ className="font-mono"
123
+ placeholder="Name of the Inference API model"
124
+ onChange={(x) => {
125
+ setHuggingfaceInferenceApiModel(x.target.value)
126
+ }}
127
+ value={huggingfaceInferenceApiModel}
128
+ />
129
+ </Field>
130
+ <p className="text-sm text-zinc-700">
131
+ Using a LoRA? Don&apos;t forget the trigger keyword! Also you will want to use the &quot;Neutral&quot; style.
132
+ </p>
133
+ <Field>
134
+ <Label>LoRA model trigger (optional):</Label>
135
+ <Input
136
+ className="font-mono"
137
+ placeholder="Trigger keyword (if you use a LoRA)"
138
+ onChange={(x) => {
139
+ setHuggingfaceInferenceApiModelTrigger(x.target.value)
140
+ }}
141
+ value={huggingfaceInferenceApiModelTrigger}
142
+ />
143
+ </Field>
144
+ </>}
145
+
146
+ {renderingModelVendor === "OPENAI" && <>
147
+ <Field>
148
+ <Label>OpenAI API Token:</Label>
149
+ <Input
150
+ className="font-mono"
151
+ type="password"
152
+ placeholder="Enter your private api token"
153
+ onChange={(x) => {
154
+ setOpenaiApiKey(x.target.value)
155
+ }}
156
+ value={openaiApiKey}
157
+ />
158
+ </Field>
159
+ <Field>
160
+ <Label>OpenAI image model:</Label>
161
+ <Input
162
+ className="font-mono"
163
+ placeholder="OpenAI image model"
164
+ onChange={(x) => {
165
+ setOpenaiApiModel(x.target.value)
166
+ }}
167
+ value={openaiApiModel}
168
+ />
169
+ </Field>
170
+ </>}
171
+
172
+ {renderingModelVendor === "REPLICATE" && <>
173
+ <Field>
174
+ <Label>Replicate API Token:</Label>
175
+ <Input
176
+ className="font-mono"
177
+ type="password"
178
+ placeholder="Enter your private api token"
179
+ onChange={(x) => {
180
+ setReplicateApiKey(x.target.value)
181
+ }}
182
+ value={replicateApiKey}
183
+ />
184
+ </Field>
185
+ <Field>
186
+ <Label>Replicate model name:</Label>
187
+ <Input
188
+ className="font-mono"
189
+ placeholder="Name of the Replicate model"
190
+ onChange={(x) => {
191
+ setReplicateApiModel(x.target.value)
192
+ }}
193
+ value={replicateApiModel}
194
+ />
195
+ </Field>
196
+ <Field>
197
+ <Label>Model version:</Label>
198
+ <Input
199
+ className="font-mono"
200
+ placeholder="Version of the Replicate model"
201
+ onChange={(x) => {
202
+ setReplicateApiModelVersion(x.target.value)
203
+ }}
204
+ value={replicateApiModelVersion}
205
+ />
206
+ </Field>
207
+ <p className="text-sm text-zinc-700">
208
+ Using a LoRA? Don&apos;t forget the trigger keyword! Also you will want to use the &quot;Neutral&quot; style.
209
+ </p>
210
+ <Field>
211
+ <Label>LoRA model trigger (optional):</Label>
212
+ <Input
213
+ className="font-mono"
214
+ placeholder={'LoRA trigger keyword eg. "style of TOK"'}
215
+ onChange={(x) => {
216
+ setReplicateApiModelTrigger(x.target.value)
217
+ }}
218
+ value={replicateApiModelTrigger}
219
+ />
220
+ </Field>
221
+ </>}
222
+ </div>
223
+ <DialogFooter>
224
+ <Button type="submit" onClick={() => setOpen(false)}>Close</Button>
225
+ </DialogFooter>
226
+ </DialogContent>
227
+ </Dialog>
228
+ )
229
+ }
src/app/interface/settings-dialog/label.tsx ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import { ReactNode } from "react"
2
+
3
+ export function Label({ children }: { children: ReactNode }) {
4
+ return (
5
+ <label className="text-base font-semibold text-zinc-700">{children}</label>
6
+ )
7
+ }
src/app/interface/settings-dialog/localStorageKeys.ts ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Settings } from "@/types"
2
+
3
+ export const localStorageKeys: Record<keyof Settings, string> = {
4
+ renderingModelVendor: "CONF_RENDERING_MODEL_VENDOR",
5
+ huggingfaceApiKey: "CONF_AUTH_HF_API_TOKEN",
6
+ huggingfaceInferenceApiModel: "CONF_RENDERING_HF_INFERENCE_API_BASE_MODEL",
7
+ huggingfaceInferenceApiModelTrigger: "CONF_RENDERING_HF_INFERENCE_API_BASE_MODEL_TRIGGER",
8
+ replicateApiKey: "CONF_AUTH_REPLICATE_API_TOKEN",
9
+ replicateApiModel: "CONF_RENDERING_REPLICATE_API_MODEL",
10
+ replicateApiModelVersion: "CONF_RENDERING_REPLICATE_API_MODEL_VERSION",
11
+ replicateApiModelTrigger: "CONF_RENDERING_REPLICATE_API_MODEL_TRIGGER",
12
+ openaiApiKey: "CONF_AUTH_OPENAI_API_KEY",
13
+ openaiApiModel: "CONF_AUTH_OPENAI_API_MODEL",
14
+ }
src/app/interface/share/index.tsx CHANGED
@@ -88,26 +88,7 @@ ${comicFileMd}`;
88
  const paramsStr = params.toString();
89
  window.open(`https://huggingface.co/spaces/jbilcke-hf/comic-factory/discussions/new?${paramsStr}`, '_blank');
90
  }
91
-
92
- <div>
93
- {
94
- // there is an issue, this env check doesn't work..
95
- // process.env.NEXT_PUBLIC_ENABLE_COMMUNITY_SHARING === "true" ?
96
- <Button
97
- onClick={handleShare}
98
- disabled={!prompt?.length}
99
- className="space-x-2"
100
- >
101
- <div className="scale-105"><HuggingClap /></div>
102
- <div>
103
- <span className="hidden md:inline">{remainingImages ? `⌛` : `Share to community`}</span>
104
- <span className="inline md:hidden">{remainingImages ? `⌛` : `Share`}</span>
105
- </div>
106
- </Button>
107
- //: null
108
- }
109
- </div>
110
-
111
  return (
112
  <Dialog open={isOpen} onOpenChange={setOpen}>
113
  <DialogTrigger asChild>
 
88
  const paramsStr = params.toString();
89
  window.open(`https://huggingface.co/spaces/jbilcke-hf/comic-factory/discussions/new?${paramsStr}`, '_blank');
90
  }
91
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  return (
93
  <Dialog open={isOpen} onOpenChange={setOpen}>
94
  <DialogTrigger asChild>
src/app/main.tsx CHANGED
@@ -52,7 +52,11 @@ export default function Main() {
52
  preset,
53
  prompt: [
54
  `${userStoryPrompt}`,
55
- stylePrompt ? `in the following context: ${stylePrompt}` : ''
 
 
 
 
56
  ].filter(x => x).join(", "), nbTotalPanels })
57
  console.log("LLM responded:", llmResponse)
58
 
 
52
  preset,
53
  prompt: [
54
  `${userStoryPrompt}`,
55
+
56
+ // not necessary + it confuses the LLM if we use custom
57
+ // + the LLM may reject some of the styles
58
+ // stylePrompt ? `in the following context: ${stylePrompt}` : ''
59
+
60
  ].filter(x => x).join(", "), nbTotalPanels })
61
  console.log("LLM responded:", llmResponse)
62
 
src/lib/getValidString.ts ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ export function getValidString(something: any, defaultValue: string) {
2
+ const strValue = `${something || defaultValue}`
3
+ try {
4
+ return JSON.parse(strValue) || defaultValue
5
+ } catch (err) {
6
+ return defaultValue
7
+ }
8
+ }
src/types.ts CHANGED
@@ -87,13 +87,19 @@ export type LLMEngine =
87
  | "OPENAI"
88
  | "REPLICATE"
89
 
90
- export type RenderingEngine =
91
  | "VIDEOCHAIN"
92
  | "OPENAI"
93
  | "REPLICATE"
94
  | "INFERENCE_API"
95
  | "INFERENCE_ENDPOINT"
96
 
 
 
 
 
 
 
97
  export type PostVisibility =
98
  | "featured" // featured by admins
99
  | "trending" // top trending / received more than 10 upvotes
@@ -133,3 +139,16 @@ export type LayoutProps = {
133
  page: number
134
  nbPanels: number
135
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  | "OPENAI"
88
  | "REPLICATE"
89
 
90
+ export type RenderingEngine =
91
  | "VIDEOCHAIN"
92
  | "OPENAI"
93
  | "REPLICATE"
94
  | "INFERENCE_API"
95
  | "INFERENCE_ENDPOINT"
96
 
97
+ export type RenderingModelVendor =
98
+ | "SERVER"
99
+ | "OPENAI"
100
+ | "REPLICATE"
101
+ | "HUGGINGFACE"
102
+
103
  export type PostVisibility =
104
  | "featured" // featured by admins
105
  | "trending" // top trending / received more than 10 upvotes
 
139
  page: number
140
  nbPanels: number
141
  }
142
+
143
+ export type Settings = {
144
+ renderingModelVendor: RenderingModelVendor
145
+ huggingfaceApiKey: string
146
+ huggingfaceInferenceApiModel: string
147
+ huggingfaceInferenceApiModelTrigger: string
148
+ replicateApiKey: string
149
+ replicateApiModel: string
150
+ replicateApiModelVersion: string
151
+ replicateApiModelTrigger: string
152
+ openaiApiKey: string
153
+ openaiApiModel: string
154
+ }