jbilcke-hf HF staff commited on
Commit
1d8d40b
2 Parent(s): 2f64630 04fccac

Merge remote-tracking branch 'origin'

Browse files
Files changed (3) hide show
  1. CONTRIBUTORS.md +2 -1
  2. README.md +11 -6
  3. src/app/queries/predict.ts +9 -3
CONTRIBUTORS.md CHANGED
@@ -5,5 +5,6 @@ This project was developed by Julian Bilcke (@jbilcke-hf), as part of his work a
5
  A huge thanks to external developers for their contributions!
6
 
7
  艾逗笔 (@idoubi):
8
- - Added support for OpenAI: https://github.com/jbilcke-hf/ai-comic-factory/pull/6
 
9
 
 
5
  A huge thanks to external developers for their contributions!
6
 
7
  艾逗笔 (@idoubi):
8
+ - [feature] Added support for OpenAI: https://github.com/jbilcke-hf/ai-comic-factory/pull/6
9
+ - [bug] predict import error (use dynamic imports for the LLM provider): https://github.com/jbilcke-hf/ai-comic-factory/pull/9
10
 
README.md CHANGED
@@ -11,6 +11,8 @@ disable_embedding: true
11
 
12
  # AI Comic Factory
13
 
 
 
14
  ## Running the project at home
15
 
16
  First, I would like to highlight that everything is open-source (see [here](https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory/tree/main), [here](https://huggingface.co/spaces/jbilcke-hf/VideoChain-API/tree/main), [here](https://huggingface.co/spaces/hysts/SD-XL/tree/main), [here](https://github.com/huggingface/text-generation-inference)).
@@ -102,10 +104,13 @@ To activate it, create a `.env.local` configuration file:
102
 
103
  ```bash
104
  LLM_ENGINE="OPENAI"
 
105
  # default openai api base url is: https://api.openai.com/v1
106
- OPENAI_API_BASE_URL="Your OpenAI API Base URL"
107
- OPENAI_API_KEY="Your OpenAI API Key"
108
- OPENAI_API_MODEL="gpt-3.5-turbo"
 
 
109
  ```
110
 
111
  ### Option 4: Fork and modify the code to use a different LLM system
@@ -140,11 +145,11 @@ To use Replicate, create a `.env.local` configuration file:
140
  ```bash
141
  RENDERING_ENGINE="REPLICATE"
142
 
143
- REPLICATE_API_TOKEN="Your Replicate token"
144
 
145
- REPLICATE_API_MODEL="stabilityai/sdxl"
146
 
147
- REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
148
  ```
149
 
150
  ### Option 3: Use another SDXL API
 
11
 
12
  # AI Comic Factory
13
 
14
+ *(note: the website "aicomicfactory.com" is not affiliated with the AI Comic Factory project, nor it is created or maintained by the AI Comic Factory team. If you see their website has an issue, please contact them directly)*
15
+
16
  ## Running the project at home
17
 
18
  First, I would like to highlight that everything is open-source (see [here](https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory/tree/main), [here](https://huggingface.co/spaces/jbilcke-hf/VideoChain-API/tree/main), [here](https://huggingface.co/spaces/hysts/SD-XL/tree/main), [here](https://github.com/huggingface/text-generation-inference)).
 
104
 
105
  ```bash
106
  LLM_ENGINE="OPENAI"
107
+
108
  # default openai api base url is: https://api.openai.com/v1
109
+ LLM_OPENAI_API_BASE_URL="Your OpenAI API Base URL"
110
+
111
+ LLM_OPENAI_API_MODEL="gpt-3.5-turbo"
112
+
113
+ AUTH_OPENAI_API_KEY="Your OpenAI API Key"
114
  ```
115
 
116
  ### Option 4: Fork and modify the code to use a different LLM system
 
145
  ```bash
146
  RENDERING_ENGINE="REPLICATE"
147
 
148
+ RENDERING_REPLICATE_API_MODEL="stabilityai/sdxl"
149
 
150
+ RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
151
 
152
+ AUTH_REPLICATE_API_TOKEN="Your Replicate token"
153
  ```
154
 
155
  ### Option 3: Use another SDXL API
src/app/queries/predict.ts CHANGED
@@ -1,9 +1,15 @@
1
  "use server"
2
 
3
  import { LLMEngine } from "@/types"
4
- import { predictWithHuggingFace } from "./predictWithHuggingFace"
5
- import { predictWithOpenAI } from "./predictWithOpenAI"
6
 
7
  const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
8
 
9
- export const predict = llmEngine === "OPENAI" ? predictWithOpenAI : predictWithHuggingFace
 
 
 
 
 
 
 
 
 
1
  "use server"
2
 
3
  import { LLMEngine } from "@/types"
 
 
4
 
5
  const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
6
 
7
+ export const predict = async () => {
8
+ if (llmEngine === "OPENAI") {
9
+ const module = await import("./predictWithOpenAI")
10
+ return module.predictWithOpenAI
11
+ } else {
12
+ const module = await import("./predictWithHuggingFace")
13
+ return module.predictWithHuggingFace
14
+ }
15
+ }