jbilcke-hf HF staff commited on
Commit
30c1ba0
β€’
1 Parent(s): eff8217

add LCM model

Browse files
src/index.mts CHANGED
@@ -162,7 +162,7 @@ app.post("/render", async (req, res) => {
162
  return
163
  }
164
 
165
- console.log(req.body)
166
 
167
  const request = parseRenderRequest(req.body as RenderRequest)
168
 
 
162
  return
163
  }
164
 
165
+ // console.log(req.body)
166
 
167
  const request = parseRenderRequest(req.body as RenderRequest)
168
 
src/production/renderImage.mts CHANGED
@@ -1,5 +1,5 @@
1
- import { generateImageLCMAsBase64 } from "../providers/image-generation/generateImageLCM.mts"
2
- import { generateImageSDXLAsBase64 } from "../providers/image-generation/generateImageSDXL.mts"
3
  import { generateImageSDXL360AsBase64 } from "../providers/image-generation/generateImageSDXL360.mts"
4
  import { RenderedScene, RenderRequest } from "../types.mts"
5
 
@@ -52,7 +52,7 @@ export async function renderImage(
52
  throw new Error(`the generated image is empty`)
53
  }
54
  } catch (err) {
55
- console.error(`failed to generate the image, although ${err}`)
56
  response.error = `failed to render scene: ${err}`
57
  response.status = "error"
58
  response.assetUrl = ""
 
1
+ import { generateImageLCMAsBase64 } from "../providers/image-generation/generateImageLCMGradio.mts"
2
+ import { generateImageSDXLAsBase64 } from "../providers/image-generation/generateImageSDXLGradio.mts"
3
  import { generateImageSDXL360AsBase64 } from "../providers/image-generation/generateImageSDXL360.mts"
4
  import { RenderedScene, RenderRequest } from "../types.mts"
5
 
 
52
  throw new Error(`the generated image is empty`)
53
  }
54
  } catch (err) {
55
+ // console.error(`failed to generate the image, due to`, err)
56
  response.error = `failed to render scene: ${err}`
57
  response.status = "error"
58
  response.assetUrl = ""
src/providers/image-generation/generateImage.mts CHANGED
@@ -30,7 +30,7 @@ export async function generateImage(options: {
30
  const blob = await hf.textToImage({
31
  inputs: [
32
  positivePrompt,
33
- "bautiful",
34
  "award winning",
35
  // "intricate details",
36
  "high resolution"
 
30
  const blob = await hf.textToImage({
31
  inputs: [
32
  positivePrompt,
33
+ "beautiful",
34
  "award winning",
35
  // "intricate details",
36
  "high resolution"
src/providers/image-generation/generateImageLCMFetch.mts ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { client } from "@gradio/client"
2
+
3
+ import { generateSeed } from "../../utils/misc/generateSeed.mts"
4
+ import { getValidNumber } from "../../utils/validators/getValidNumber.mts"
5
+
6
+ // TODO add a system to mark failed instances as "unavailable" for a couple of minutes
7
+ // console.log("process.env:", process.env)
8
+
9
+ // note: to reduce costs I use the small A10s (not the large)
10
+ // anyway, we will soon not need to use this cloud anymore
11
+ // since we will be able to leverage the Inference API
12
+ const instance = `${process.env.VC_LCM_SPACE_API_URL || ""}`
13
+ const secretToken = `${process.env.VC_MICROSERVICE_SECRET_TOKEN || ""}`
14
+
15
+ // console.log("DEBUG:", JSON.stringify({ instances, secretToken }, null, 2))
16
+
17
+ export async function generateImageLCMAsBase64(options: {
18
+ positivePrompt: string;
19
+ negativePrompt?: string;
20
+ seed?: number;
21
+ width?: number;
22
+ height?: number;
23
+ nbSteps?: number;
24
+ }): Promise<string> {
25
+
26
+ // console.log("querying " + instance)
27
+ const positivePrompt = options?.positivePrompt || ""
28
+ if (!positivePrompt) {
29
+ throw new Error("missing prompt")
30
+ }
31
+
32
+ // the negative prompt CAN be missing, since we use a trick
33
+ // where we make the interface mandatory in the TS doc,
34
+ // but browsers might send something partial
35
+ const negativePrompt = options?.negativePrompt || ""
36
+
37
+ // we treat 0 as meaning "random seed"
38
+ const seed = (options?.seed ? options.seed : 0) || generateSeed()
39
+
40
+ const width = getValidNumber(options?.width, 256, 1024, 512)
41
+ const height = getValidNumber(options?.height, 256, 1024, 512)
42
+ const nbSteps = getValidNumber(options?.nbSteps, 1, 8, 4)
43
+ // console.log("SEED:", seed)
44
+
45
+ const positive = [
46
+
47
+ // oh well.. is it too late to move this to the bottom?
48
+ "beautiful",
49
+
50
+ // too opinionated, so let's remove it
51
+ // "intricate details",
52
+
53
+ positivePrompt,
54
+
55
+ "award winning",
56
+ "high resolution"
57
+ ].filter(word => word)
58
+ .join(", ")
59
+
60
+ const negative = [
61
+ negativePrompt,
62
+ "watermark",
63
+ "copyright",
64
+ "blurry",
65
+ // "artificial",
66
+ // "cropped",
67
+ "low quality",
68
+ "ugly"
69
+ ].filter(word => word)
70
+ .join(", ")
71
+
72
+ const res = await fetch(instance + (instance.endsWith("/") ? "" : "/") + "api/predict", {
73
+ method: "POST",
74
+ headers: {
75
+ "Content-Type": "application/json",
76
+ // Authorization: `Bearer ${token}`,
77
+ },
78
+ body: JSON.stringify({
79
+ fn_index: 1, // <- important!
80
+ data: [
81
+ positive, // string in 'Prompt' Textbox component
82
+ negative, // string in 'Negative prompt' Textbox component
83
+ seed, // number (numeric value between 0 and 2147483647) in 'Seed' Slider component
84
+ width, // number (numeric value between 256 and 1024) in 'Width' Slider component
85
+ height, // number (numeric value between 256 and 1024) in 'Height' Slider component
86
+ 0.0, // can be disabled for LCM-LORA-SSD-1B
87
+ nbSteps, // number (numeric value between 2 and 8) in 'Number of inference steps for base' Slider component
88
+ secretToken
89
+ ],
90
+ }),
91
+ cache: "no-store",
92
+ })
93
+
94
+ const { data } = await res.json()
95
+
96
+
97
+ // Recommendation: handle errors
98
+ if (res.status !== 200 || !Array.isArray(data)) {
99
+ // This will activate the closest `error.js` Error Boundary
100
+ throw new Error(`Failed to fetch data (status: ${res.status})`)
101
+ }
102
+ // console.log("data:", data.slice(0, 50))
103
+
104
+ if (!data[0]) {
105
+ throw new Error(`the returned image was empty`)
106
+ }
107
+
108
+ return data[0] as string
109
+ }
src/providers/image-generation/{generateImageLCM.mts β†’ generateImageLCMGradio.mts} RENAMED
@@ -1,3 +1,4 @@
 
1
  import { client } from "@gradio/client"
2
 
3
  import { generateSeed } from "../../utils/misc/generateSeed.mts"
@@ -21,9 +22,9 @@ export async function generateImageLCMAsBase64(options: {
21
  width?: number;
22
  height?: number;
23
  nbSteps?: number;
24
- }) {
25
 
26
- console.log("querying " + instance)
27
  const positivePrompt = options?.positivePrompt || ""
28
  if (!positivePrompt) {
29
  throw new Error("missing prompt")
@@ -39,7 +40,7 @@ export async function generateImageLCMAsBase64(options: {
39
 
40
  const width = getValidNumber(options?.width, 256, 1024, 512)
41
  const height = getValidNumber(options?.height, 256, 1024, 512)
42
- const nbSteps = getValidNumber(options?.nbSteps, 2, 40, 20)
43
  // console.log("SEED:", seed)
44
 
45
  const positive = [
@@ -73,16 +74,14 @@ export async function generateImageLCMAsBase64(options: {
73
  hf_token: `${process.env.VC_HF_API_TOKEN}` as any
74
  })
75
 
76
-
77
  const rawResponse = (await api.predict("/run", [
78
  positive, // string in 'Prompt' Textbox component
79
  negative, // string in 'Negative prompt' Textbox component
80
- true, // boolean in 'Use negative prompt' Checkbox component
81
  seed, // number (numeric value between 0 and 2147483647) in 'Seed' Slider component
82
  width, // number (numeric value between 256 and 1024) in 'Width' Slider component
83
  height, // number (numeric value between 256 and 1024) in 'Height' Slider component
84
- 8, // number (numeric value between 1 and 20) in 'Guidance scale for base' Slider component
85
- nbSteps, // number (numeric value between 210 and 40) in 'Number of inference steps for base' Slider component
86
  secretToken
87
  ])) as any
88
 
@@ -91,4 +90,4 @@ export async function generateImageLCMAsBase64(options: {
91
  throw new Error(`the returned image was empty`)
92
  }
93
  return result
94
- }
 
1
+
2
  import { client } from "@gradio/client"
3
 
4
  import { generateSeed } from "../../utils/misc/generateSeed.mts"
 
22
  width?: number;
23
  height?: number;
24
  nbSteps?: number;
25
+ }): Promise<string> {
26
 
27
+ // console.log("querying " + instance)
28
  const positivePrompt = options?.positivePrompt || ""
29
  if (!positivePrompt) {
30
  throw new Error("missing prompt")
 
40
 
41
  const width = getValidNumber(options?.width, 256, 1024, 512)
42
  const height = getValidNumber(options?.height, 256, 1024, 512)
43
+ const nbSteps = getValidNumber(options?.nbSteps, 1, 8, 4)
44
  // console.log("SEED:", seed)
45
 
46
  const positive = [
 
74
  hf_token: `${process.env.VC_HF_API_TOKEN}` as any
75
  })
76
 
 
77
  const rawResponse = (await api.predict("/run", [
78
  positive, // string in 'Prompt' Textbox component
79
  negative, // string in 'Negative prompt' Textbox component
 
80
  seed, // number (numeric value between 0 and 2147483647) in 'Seed' Slider component
81
  width, // number (numeric value between 256 and 1024) in 'Width' Slider component
82
  height, // number (numeric value between 256 and 1024) in 'Height' Slider component
83
+ 0.0, // can be disabled for LCM-LORA-SSD-1B
84
+ nbSteps, // number (numeric value between 2 and 8) in 'Number of inference steps for base' Slider component
85
  secretToken
86
  ])) as any
87
 
 
90
  throw new Error(`the returned image was empty`)
91
  }
92
  return result
93
+ }
src/providers/image-generation/generateImageSDXL360.mts CHANGED
@@ -18,7 +18,7 @@ export async function generateImageSDXL360AsBase64(options: {
18
  width?: number;
19
  height?: number;
20
  nbSteps?: number;
21
- }) {
22
 
23
  const positivePrompt = options?.positivePrompt || ""
24
  if (!positivePrompt) {
@@ -32,7 +32,7 @@ export async function generateImageSDXL360AsBase64(options: {
32
  const width = getValidNumber(options?.width, 256, 1024, 512)
33
  const height = getValidNumber(options?.height, 256, 1024, 512)
34
  const nbSteps = getValidNumber(options?.nbSteps, 5, 100, 20)
35
- console.log("SEED FOR 360:", seed)
36
 
37
  const instance = instances.shift()
38
  instances.push(instance)
 
18
  width?: number;
19
  height?: number;
20
  nbSteps?: number;
21
+ }): Promise<string> {
22
 
23
  const positivePrompt = options?.positivePrompt || ""
24
  if (!positivePrompt) {
 
32
  const width = getValidNumber(options?.width, 256, 1024, 512)
33
  const height = getValidNumber(options?.height, 256, 1024, 512)
34
  const nbSteps = getValidNumber(options?.nbSteps, 5, 100, 20)
35
+ // console.log("SEED FOR 360:", seed)
36
 
37
  const instance = instances.shift()
38
  instances.push(instance)
src/providers/image-generation/generateImageSDXLFetch.mts ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { client } from "@gradio/client"
2
+
3
+ import { generateSeed } from "../../utils/misc/generateSeed.mts"
4
+ import { getValidNumber } from "../../utils/validators/getValidNumber.mts"
5
+
6
+ // TODO add a system to mark failed instances as "unavailable" for a couple of minutes
7
+ // console.log("process.env:", process.env)
8
+
9
+ // note: to reduce costs I use the small A10s (not the large)
10
+ // anyway, we will soon not need to use this cloud anymore
11
+ // since we will be able to leverage the Inference API
12
+ const instance = `${process.env.VC_SDXL_SPACE_API_URL || ""}`
13
+ const secretToken = `${process.env.VC_MICROSERVICE_SECRET_TOKEN || ""}`
14
+
15
+ // console.log("DEBUG:", JSON.stringify({ instances, secretToken }, null, 2))
16
+
17
+ export async function generateImageSDXLAsBase64(options: {
18
+ positivePrompt: string;
19
+ negativePrompt?: string;
20
+ seed?: number;
21
+ width?: number;
22
+ height?: number;
23
+ nbSteps?: number;
24
+ }): Promise<string> {
25
+
26
+ const positivePrompt = options?.positivePrompt || ""
27
+ if (!positivePrompt) {
28
+ throw new Error("missing prompt")
29
+ }
30
+
31
+ // the negative prompt CAN be missing, since we use a trick
32
+ // where we make the interface mandatory in the TS doc,
33
+ // but browsers might send something partial
34
+ const negativePrompt = options?.negativePrompt || ""
35
+
36
+ // we treat 0 as meaning "random seed"
37
+ const seed = (options?.seed ? options.seed : 0) || generateSeed()
38
+
39
+ const width = getValidNumber(options?.width, 256, 1024, 512)
40
+ const height = getValidNumber(options?.height, 256, 1024, 512)
41
+ const nbSteps = getValidNumber(options?.nbSteps, 5, 100, 20)
42
+ // console.log("SEED:", seed)
43
+
44
+ const positive = [
45
+
46
+ // oh well.. is it too late to move this to the bottom?
47
+ "beautiful",
48
+ // "intricate details",
49
+ positivePrompt,
50
+
51
+ "award winning",
52
+ "high resolution"
53
+ ].filter(word => word)
54
+ .join(", ")
55
+
56
+ const negative = [
57
+ negativePrompt,
58
+ "watermark",
59
+ "copyright",
60
+ "blurry",
61
+ // "artificial",
62
+ // "cropped",
63
+ "low quality",
64
+ "ugly"
65
+ ].filter(word => word)
66
+ .join(", ")
67
+
68
+ const res = await fetch(instance + (instance.endsWith("/") ? "" : "/") + "api/predict", {
69
+ method: "POST",
70
+ headers: {
71
+ "Content-Type": "application/json",
72
+ // Authorization: `Bearer ${token}`,
73
+ },
74
+ body: JSON.stringify({
75
+ fn_index: 1, // <- important!
76
+ data: [
77
+ positive, // string in 'Prompt' Textbox component
78
+ negative, // string in 'Negative prompt' Textbox component
79
+ positive, // string in 'Prompt 2' Textbox component
80
+ negative, // string in 'Negative prompt 2' Textbox component
81
+ true, // boolean in 'Use negative prompt' Checkbox component
82
+ false, // boolean in 'Use prompt 2' Checkbox component
83
+ false, // boolean in 'Use negative prompt 2' Checkbox component
84
+ seed, // number (numeric value between 0 and 2147483647) in 'Seed' Slider component
85
+ width, // number (numeric value between 256 and 1024) in 'Width' Slider component
86
+ height, // number (numeric value between 256 and 1024) in 'Height' Slider component
87
+ 8, // number (numeric value between 1 and 20) in 'Guidance scale for base' Slider component
88
+ 8, // number (numeric value between 1 and 20) in 'Guidance scale for refiner' Slider component
89
+ nbSteps, // number (numeric value between 10 and 100) in 'Number of inference steps for base' Slider component
90
+ nbSteps, // number (numeric value between 10 and 100) in 'Number of inference steps for refiner' Slider component
91
+ true, // boolean in 'Apply refiner' Checkbox component,
92
+ secretToken
93
+ ],
94
+ }),
95
+ cache: "no-store",
96
+ })
97
+
98
+ const { data } = await res.json()
99
+
100
+ // console.log("data:", data)
101
+ // Recommendation: handle errors
102
+ if (res.status !== 200 || !Array.isArray(data)) {
103
+ // This will activate the closest `error.js` Error Boundary
104
+ throw new Error(`Failed to fetch data (status: ${res.status})`)
105
+ }
106
+ // console.log("data:", data.slice(0, 50))
107
+
108
+ if (!data[0]) {
109
+ throw new Error(`the returned image was empty`)
110
+ }
111
+
112
+ return data[0] as string
113
+ }
src/providers/image-generation/{generateImageSDXL.mts β†’ generateImageSDXLGradio.mts} RENAMED
@@ -21,7 +21,7 @@ export async function generateImageSDXLAsBase64(options: {
21
  width?: number;
22
  height?: number;
23
  nbSteps?: number;
24
- }) {
25
 
26
  const positivePrompt = options?.positivePrompt || ""
27
  if (!positivePrompt) {
 
21
  width?: number;
22
  height?: number;
23
  nbSteps?: number;
24
+ }): Promise<string> {
25
 
26
  const positivePrompt = options?.positivePrompt || ""
27
  if (!positivePrompt) {
src/utils/requests/parseRenderRequest.mts CHANGED
@@ -5,7 +5,7 @@ import { getValidNumber } from "../validators/getValidNumber.mts"
5
 
6
  export function parseRenderRequest(request: RenderRequest) {
7
 
8
- console.log("parseRenderRequest: "+JSON.stringify(request, null, 2))
9
  try {
10
  request.nbFrames = getValidNumber(request.nbFrames, 1, 24, 16)
11
 
@@ -22,16 +22,16 @@ export function parseRenderRequest(request: RenderRequest) {
22
 
23
  request.upscalingFactor = getValidNumber(request.upscalingFactor, 0, 4, 0)
24
 
25
- request.nbSteps = getValidNumber(request.nbSteps, 5, 50, 10)
26
 
27
  request.analyze = request?.analyze ? true : false
28
 
29
  if (isVideo) {
30
- request.width = getValidNumber(request.width, 256, 1024, 1024)
31
- request.height = getValidNumber(request.height, 256, 1024, 512)
32
  } else {
33
- request.width = getValidNumber(request.width, 256, 1280, 576)
34
- request.height = getValidNumber(request.height, 256, 720, 320)
35
  }
36
 
37
  request.turbo = getValidBoolean(request.turbo, false)
@@ -42,6 +42,6 @@ export function parseRenderRequest(request: RenderRequest) {
42
  console.error(`failed to parse the render request: ${err}`)
43
  }
44
 
45
- console.log("parsed request: "+JSON.stringify(request, null, 2))
46
  return request
47
  }
 
5
 
6
  export function parseRenderRequest(request: RenderRequest) {
7
 
8
+ // console.log("parseRenderRequest: "+JSON.stringify(request, null, 2))
9
  try {
10
  request.nbFrames = getValidNumber(request.nbFrames, 1, 24, 16)
11
 
 
22
 
23
  request.upscalingFactor = getValidNumber(request.upscalingFactor, 0, 4, 0)
24
 
25
+ request.nbSteps = getValidNumber(request.nbSteps, 1, 50, 10)
26
 
27
  request.analyze = request?.analyze ? true : false
28
 
29
  if (isVideo) {
30
+ request.width = getValidNumber(request.width, 256, 2048, 576)
31
+ request.height = getValidNumber(request.height, 256, 2048, 320)
32
  } else {
33
+ request.width = getValidNumber(request.width, 256, 2048, 1024)
34
+ request.height = getValidNumber(request.height, 256, 2048, 1024)
35
  }
36
 
37
  request.turbo = getValidBoolean(request.turbo, false)
 
42
  console.error(`failed to parse the render request: ${err}`)
43
  }
44
 
45
+ // console.log("parsed request: "+JSON.stringify(request, null, 2))
46
  return request
47
  }