jbilcke-hf HF staff commited on
Commit
e015622
β€’
1 Parent(s): a0c8cbd

Add experimental support for Claude

Browse files
.env CHANGED
@@ -11,6 +11,7 @@ RENDERING_ENGINE="INFERENCE_API"
11
  # - INFERENCE_API
12
  # - OPENAI
13
  # - GROQ
 
14
  LLM_ENGINE="INFERENCE_API"
15
 
16
  # set this to control the number of pages
@@ -46,19 +47,22 @@ AUTH_VIDEOCHAIN_API_TOKEN=
46
  # Groq.com key: available for the LLM engine
47
  AUTH_GROQ_API_KEY=
48
 
 
 
 
49
  # ------------- RENDERING API CONFIG --------------
50
 
51
- # If you decided to use Replicate for the RENDERING engine
52
  RENDERING_REPLICATE_API_MODEL="stabilityai/sdxl"
53
  RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
54
 
55
- # If you decided to use a private Hugging Face Inference Endpoint for the RENDERING engine
56
  RENDERING_HF_INFERENCE_ENDPOINT_URL="https://XXXXXXXXXX.endpoints.huggingface.cloud"
57
 
58
- # If you decided to use a Hugging Face Inference API model for the RENDERING engine
59
  RENDERING_HF_INFERENCE_API_BASE_MODEL="stabilityai/stable-diffusion-xl-base-1.0"
60
 
61
- # If you decided to use a Hugging Face Inference API model for the RENDERING engine
62
  RENDERING_HF_INFERENCE_API_REFINER_MODEL="stabilityai/stable-diffusion-xl-refiner-1.0"
63
 
64
  # If your model returns a different file type (eg. jpg or webp) change it here
@@ -66,7 +70,7 @@ RENDERING_HF_INFERENCE_API_FILE_TYPE="image/png"
66
 
67
  # An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
68
  RENDERING_VIDEOCHAIN_API_URL="http://localhost:7860"
69
-
70
  RENDERING_OPENAI_API_BASE_URL="https://api.openai.com/v1"
71
  RENDERING_OPENAI_API_MODEL="dall-e-3"
72
 
@@ -74,14 +78,18 @@ RENDERING_OPENAI_API_MODEL="dall-e-3"
74
 
75
  LLM_GROQ_API_MODEL="mixtral-8x7b-32768"
76
 
77
- # If you decided to use OpenAI for the LLM engine
78
  LLM_OPENAI_API_BASE_URL="https://api.openai.com/v1"
79
  LLM_OPENAI_API_MODEL="gpt-4"
80
 
81
- # If you decided to use a private Hugging Face Inference Endpoint for the LLM engine
 
 
 
 
82
  LLM_HF_INFERENCE_ENDPOINT_URL=""
83
 
84
- # If you decided to use a Hugging Face Inference API model for the LLM engine
85
  # LLM_HF_INFERENCE_API_MODEL="HuggingFaceH4/zephyr-7b-beta"
86
  LLM_HF_INFERENCE_API_MODEL="HuggingFaceH4/zephyr-7b-beta"
87
 
 
11
  # - INFERENCE_API
12
  # - OPENAI
13
  # - GROQ
14
+ # - ANTHROPIC
15
  LLM_ENGINE="INFERENCE_API"
16
 
17
  # set this to control the number of pages
 
47
  # Groq.com key: available for the LLM engine
48
  AUTH_GROQ_API_KEY=
49
 
50
+ # Anthropic.com key: available for the LLM engine
51
+ AUTH_ANTHROPIC_API_KEY=
52
+
53
  # ------------- RENDERING API CONFIG --------------
54
 
55
+ # If you decide to use Replicate for the RENDERING engine
56
  RENDERING_REPLICATE_API_MODEL="stabilityai/sdxl"
57
  RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
58
 
59
+ # If you decide to use a private Hugging Face Inference Endpoint for the RENDERING engine
60
  RENDERING_HF_INFERENCE_ENDPOINT_URL="https://XXXXXXXXXX.endpoints.huggingface.cloud"
61
 
62
+ # If you decide to use a Hugging Face Inference API model for the RENDERING engine
63
  RENDERING_HF_INFERENCE_API_BASE_MODEL="stabilityai/stable-diffusion-xl-base-1.0"
64
 
65
+ # If you decide to use a Hugging Face Inference API model for the RENDERING engine
66
  RENDERING_HF_INFERENCE_API_REFINER_MODEL="stabilityai/stable-diffusion-xl-refiner-1.0"
67
 
68
  # If your model returns a different file type (eg. jpg or webp) change it here
 
70
 
71
  # An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
72
  RENDERING_VIDEOCHAIN_API_URL="http://localhost:7860"
73
+ you decide
74
  RENDERING_OPENAI_API_BASE_URL="https://api.openai.com/v1"
75
  RENDERING_OPENAI_API_MODEL="dall-e-3"
76
 
 
78
 
79
  LLM_GROQ_API_MODEL="mixtral-8x7b-32768"
80
 
81
+ # If you decide to use OpenAI for the LLM engine
82
  LLM_OPENAI_API_BASE_URL="https://api.openai.com/v1"
83
  LLM_OPENAI_API_MODEL="gpt-4"
84
 
85
+ # If you decide to use Anthropic (eg. Claude) for the LLM engine
86
+ # https://docs.anthropic.com/claude/docs/models-overview
87
+ LLM_ANTHROPIC_API_MODEL="claude-3-opus-20240229"
88
+
89
+ # If you decide to use a private Hugging Face Inference Endpoint for the LLM engine
90
  LLM_HF_INFERENCE_ENDPOINT_URL=""
91
 
92
+ # If you decide to use a Hugging Face Inference API model for the LLM engine
93
  # LLM_HF_INFERENCE_API_MODEL="HuggingFaceH4/zephyr-7b-beta"
94
  LLM_HF_INFERENCE_API_MODEL="HuggingFaceH4/zephyr-7b-beta"
95
 
README.md CHANGED
@@ -31,13 +31,14 @@ it requires various components to run for the frontend, backend, LLM, SDXL etc.
31
  If you try to duplicate the project, open the `.env` you will see it requires some variables.
32
 
33
  Provider config:
34
- - `LLM_ENGINE`: can be one of: "INFERENCE_API", "INFERENCE_ENDPOINT", "OPENAI", or "GROQ"
35
  - `RENDERING_ENGINE`: can be one of: "INFERENCE_API", "INFERENCE_ENDPOINT", "REPLICATE", "VIDEOCHAIN", "OPENAI" for now, unless you code your custom solution
36
 
37
  Auth config:
38
  - `AUTH_HF_API_TOKEN`: if you decide to use Hugging Face for the LLM engine (inference api model or a custom inference endpoint)
39
  - `AUTH_OPENAI_API_KEY`: to use OpenAI for the LLM engine
40
  - `AUTH_GROQ_API_KEY`: to use Groq for the LLM engine
 
41
  - `AUTH_VIDEOCHAIN_API_TOKEN`: secret token to access the VideoChain API server
42
  - `AUTH_REPLICATE_API_TOKEN`: in case you want to use Replicate.com
43
 
@@ -56,6 +57,7 @@ Language model config (depending on the LLM engine you decide to use):
56
  - `LLM_OPENAI_API_BASE_URL`: "https://api.openai.com/v1"
57
  - `LLM_OPENAI_API_MODEL`: "gpt-4"
58
  - `LLM_GROQ_API_MODEL`: "mixtral-8x7b-32768"
 
59
 
60
  In addition, there are some community sharing variables that you can just ignore.
61
  Those variables are not required to run the AI Comic Factory on your own website or computer
@@ -76,7 +78,7 @@ To customise a variable locally, you should create a `.env.local`
76
 
77
  Currently the AI Comic Factory uses [zephyr-7b-beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) through an [Inference Endpoint](https://huggingface.co/docs/inference-endpoints/index).
78
 
79
- You have three options:
80
 
81
  ### Option 1: Use an Inference API model
82
 
@@ -134,8 +136,17 @@ LLM_GROQ_API_MODEL="mixtral-8x7b-32768"
134
 
135
  AUTH_GROQ_API_KEY="Your own GROQ API Key"
136
  ```
 
137
 
138
- ### Option 5: Fork and modify the code to use a different LLM system
 
 
 
 
 
 
 
 
139
 
140
  Another option could be to disable the LLM completely and replace it with another LLM protocol and/or provider (eg. Claude, Replicate), or a human-generated story instead (by returning mock or static data).
141
 
 
31
  If you try to duplicate the project, open the `.env` you will see it requires some variables.
32
 
33
  Provider config:
34
+ - `LLM_ENGINE`: can be one of `INFERENCE_API`, `INFERENCE_ENDPOINT`, `OPENAI`, `GROQ`, `ANTHROPIC`
35
  - `RENDERING_ENGINE`: can be one of: "INFERENCE_API", "INFERENCE_ENDPOINT", "REPLICATE", "VIDEOCHAIN", "OPENAI" for now, unless you code your custom solution
36
 
37
  Auth config:
38
  - `AUTH_HF_API_TOKEN`: if you decide to use Hugging Face for the LLM engine (inference api model or a custom inference endpoint)
39
  - `AUTH_OPENAI_API_KEY`: to use OpenAI for the LLM engine
40
  - `AUTH_GROQ_API_KEY`: to use Groq for the LLM engine
41
+ - `AUTH_ANTHROPIC_API_KEY`: to use Anthropic (Claude) for the LLM engine
42
  - `AUTH_VIDEOCHAIN_API_TOKEN`: secret token to access the VideoChain API server
43
  - `AUTH_REPLICATE_API_TOKEN`: in case you want to use Replicate.com
44
 
 
57
  - `LLM_OPENAI_API_BASE_URL`: "https://api.openai.com/v1"
58
  - `LLM_OPENAI_API_MODEL`: "gpt-4"
59
  - `LLM_GROQ_API_MODEL`: "mixtral-8x7b-32768"
60
+ - `LLM_ANTHROPIC_API_MODEL`: "claude-3-opus-20240229"
61
 
62
  In addition, there are some community sharing variables that you can just ignore.
63
  Those variables are not required to run the AI Comic Factory on your own website or computer
 
78
 
79
  Currently the AI Comic Factory uses [zephyr-7b-beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) through an [Inference Endpoint](https://huggingface.co/docs/inference-endpoints/index).
80
 
81
+ You have multiple options:
82
 
83
  ### Option 1: Use an Inference API model
84
 
 
136
 
137
  AUTH_GROQ_API_KEY="Your own GROQ API Key"
138
  ```
139
+ ### Option 5: (new, experimental) use Anthropic (Claude)
140
 
141
+ ```bash
142
+ LLM_ENGINE="ANTHROPIC"
143
+
144
+ LLM_ANTHROPIC_API_MODEL="claude-3-opus-20240229"
145
+
146
+ AUTH_ANTHROPIC_API_KEY="Your own ANTHROPIC API Key"
147
+ ```
148
+
149
+ ### Option 6: Fork and modify the code to use a different LLM system
150
 
151
  Another option could be to disable the LLM completely and replace it with another LLM protocol and/or provider (eg. Claude, Replicate), or a human-generated story instead (by returning mock or static data).
152
 
package-lock.json CHANGED
@@ -1,13 +1,14 @@
1
  {
2
  "name": "@jbilcke/comic-factory",
3
- "version": "0.0.0",
4
  "lockfileVersion": 3,
5
  "requires": true,
6
  "packages": {
7
  "": {
8
  "name": "@jbilcke/comic-factory",
9
- "version": "0.0.0",
10
  "dependencies": {
 
11
  "@huggingface/hub": "^0.14.2",
12
  "@huggingface/inference": "^2.6.1",
13
  "@radix-ui/react-accordion": "^1.1.2",
@@ -93,6 +94,30 @@
93
  "url": "https://github.com/sponsors/sindresorhus"
94
  }
95
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  "node_modules/@babel/runtime": {
97
  "version": "7.24.1",
98
  "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.1.tgz",
 
1
  {
2
  "name": "@jbilcke/comic-factory",
3
+ "version": "1.2.0",
4
  "lockfileVersion": 3,
5
  "requires": true,
6
  "packages": {
7
  "": {
8
  "name": "@jbilcke/comic-factory",
9
+ "version": "1.2.0",
10
  "dependencies": {
11
+ "@anthropic-ai/sdk": "^0.19.1",
12
  "@huggingface/hub": "^0.14.2",
13
  "@huggingface/inference": "^2.6.1",
14
  "@radix-ui/react-accordion": "^1.1.2",
 
94
  "url": "https://github.com/sponsors/sindresorhus"
95
  }
96
  },
97
+ "node_modules/@anthropic-ai/sdk": {
98
+ "version": "0.19.1",
99
+ "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.19.1.tgz",
100
+ "integrity": "sha512-u9i8yN8wAr/ujaXSRjfYXiYzhCk2mdUG6G9y5IAKEAPJHwFTrEyf76Z4V1LqqFbDBlZqm0tkoMMpU8tmp65ocA==",
101
+ "dependencies": {
102
+ "@types/node": "^18.11.18",
103
+ "@types/node-fetch": "^2.6.4",
104
+ "abort-controller": "^3.0.0",
105
+ "agentkeepalive": "^4.2.1",
106
+ "digest-fetch": "^1.3.0",
107
+ "form-data-encoder": "1.7.2",
108
+ "formdata-node": "^4.3.2",
109
+ "node-fetch": "^2.6.7",
110
+ "web-streams-polyfill": "^3.2.1"
111
+ }
112
+ },
113
+ "node_modules/@anthropic-ai/sdk/node_modules/@types/node": {
114
+ "version": "18.19.28",
115
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.28.tgz",
116
+ "integrity": "sha512-J5cOGD9n4x3YGgVuaND6khm5x07MMdAKkRyXnjVR6KFhLMNh2yONGiP7Z+4+tBOt5mK+GvDTiacTOVGGpqiecw==",
117
+ "dependencies": {
118
+ "undici-types": "~5.26.4"
119
+ }
120
+ },
121
  "node_modules/@babel/runtime": {
122
  "version": "7.24.1",
123
  "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.1.tgz",
package.json CHANGED
@@ -9,6 +9,7 @@
9
  "lint": "next lint"
10
  },
11
  "dependencies": {
 
12
  "@huggingface/hub": "^0.14.2",
13
  "@huggingface/inference": "^2.6.1",
14
  "@radix-ui/react-accordion": "^1.1.2",
 
9
  "lint": "next lint"
10
  },
11
  "dependencies": {
12
+ "@anthropic-ai/sdk": "^0.19.1",
13
  "@huggingface/hub": "^0.14.2",
14
  "@huggingface/inference": "^2.6.1",
15
  "@radix-ui/react-accordion": "^1.1.2",
src/app/queries/predict.ts CHANGED
@@ -4,10 +4,12 @@ import { LLMEngine } from "@/types"
4
  import { predict as predictWithHuggingFace } from "./predictWithHuggingFace"
5
  import { predict as predictWithOpenAI } from "./predictWithOpenAI"
6
  import { predict as predictWithGroq } from "./predictWithGroq"
 
7
 
8
  const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
9
 
10
  export const predict =
11
  llmEngine === "GROQ" ? predictWithGroq :
 
12
  llmEngine === "OPENAI" ? predictWithOpenAI :
13
  predictWithHuggingFace
 
4
  import { predict as predictWithHuggingFace } from "./predictWithHuggingFace"
5
  import { predict as predictWithOpenAI } from "./predictWithOpenAI"
6
  import { predict as predictWithGroq } from "./predictWithGroq"
7
+ import { predict as predictWithAnthropic } from "./predictWithAnthropic"
8
 
9
  const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
10
 
11
  export const predict =
12
  llmEngine === "GROQ" ? predictWithGroq :
13
+ llmEngine === "ANTHROPIC" ? predictWithAnthropic :
14
  llmEngine === "OPENAI" ? predictWithOpenAI :
15
  predictWithHuggingFace
src/app/queries/predictNextPanels.ts CHANGED
@@ -1,12 +1,11 @@
1
-
2
- import { predict } from "./predict"
3
- import { Preset } from "../engine/presets"
4
  import { GeneratedPanel } from "@/types"
5
  import { cleanJson } from "@/lib/cleanJson"
6
- import { createZephyrPrompt } from "@/lib/createZephyrPrompt"
7
  import { dirtyGeneratedPanelCleaner } from "@/lib/dirtyGeneratedPanelCleaner"
8
  import { dirtyGeneratedPanelsParser } from "@/lib/dirtyGeneratedPanelsParser"
9
  import { sleep } from "@/lib/sleep"
 
 
 
10
  import { getSystemPrompt } from "./getSystemPrompt"
11
  import { getUserPrompt } from "./getUserPrompt"
12
 
 
 
 
 
1
  import { GeneratedPanel } from "@/types"
2
  import { cleanJson } from "@/lib/cleanJson"
 
3
  import { dirtyGeneratedPanelCleaner } from "@/lib/dirtyGeneratedPanelCleaner"
4
  import { dirtyGeneratedPanelsParser } from "@/lib/dirtyGeneratedPanelsParser"
5
  import { sleep } from "@/lib/sleep"
6
+
7
+ import { Preset } from "../engine/presets"
8
+ import { predict } from "./predict"
9
  import { getSystemPrompt } from "./getSystemPrompt"
10
  import { getUserPrompt } from "./getUserPrompt"
11
 
src/app/queries/predictWithAnthropic.ts ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "use server"
2
+
3
+ import Anthropic from '@anthropic-ai/sdk';
4
+ import { MessageParam } from '@anthropic-ai/sdk/resources';
5
+
6
+ export async function predict({
7
+ systemPrompt,
8
+ userPrompt,
9
+ nbMaxNewTokens,
10
+ }: {
11
+ systemPrompt: string
12
+ userPrompt: string
13
+ nbMaxNewTokens: number
14
+ }): Promise<string> {
15
+ const anthropicApiKey = `${process.env.AUTH_ANTHROPIC_API_KEY || ""}`
16
+ const anthropicApiModel = `${process.env.LLM_ANTHROPIC_API_MODEL || "claude-3-opus-20240229"}`
17
+
18
+ const anthropic = new Anthropic({
19
+ apiKey: anthropicApiKey,
20
+ })
21
+
22
+ const messages: MessageParam[] = [
23
+ { role: "user", content: userPrompt },
24
+ ]
25
+
26
+ try {
27
+ const res = await anthropic.messages.create({
28
+ messages: messages,
29
+ // stream: false,
30
+ system: systemPrompt,
31
+ model: anthropicApiModel,
32
+ // temperature: 0.8,
33
+ max_tokens: nbMaxNewTokens,
34
+ })
35
+
36
+ return res.content[0]?.text || ""
37
+ } catch (err) {
38
+ console.error(`error during generation: ${err}`)
39
+ return ""
40
+ }
41
+ }
src/types.ts CHANGED
@@ -101,6 +101,7 @@ export type LLMEngine =
101
  | "OPENAI"
102
  | "REPLICATE"
103
  | "GROQ"
 
104
 
105
  export type RenderingEngine =
106
  | "VIDEOCHAIN"
 
101
  | "OPENAI"
102
  | "REPLICATE"
103
  | "GROQ"
104
+ | "ANTHROPIC"
105
 
106
  export type RenderingEngine =
107
  | "VIDEOCHAIN"