Julian BILCKE commited on
Commit
938a431
β€’
2 Parent(s): 28ce999 e1c1857

Merge pull request #9 from all-in-aigc/bugfix/predict-import-error

Browse files
Files changed (2) hide show
  1. README.md +9 -6
  2. src/app/queries/predict.ts +9 -3
README.md CHANGED
@@ -102,10 +102,13 @@ To activate it, create a `.env.local` configuration file:
102
 
103
  ```bash
104
  LLM_ENGINE="OPENAI"
 
105
  # default openai api base url is: https://api.openai.com/v1
106
- OPENAI_API_BASE_URL="Your OpenAI API Base URL"
107
- OPENAI_API_KEY="Your OpenAI API Key"
108
- OPENAI_API_MODEL="gpt-3.5-turbo"
 
 
109
  ```
110
 
111
  ### Option 4: Fork and modify the code to use a different LLM system
@@ -140,11 +143,11 @@ To use Replicate, create a `.env.local` configuration file:
140
  ```bash
141
  RENDERING_ENGINE="REPLICATE"
142
 
143
- REPLICATE_API_TOKEN="Your Replicate token"
144
 
145
- REPLICATE_API_MODEL="stabilityai/sdxl"
146
 
147
- REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
148
  ```
149
 
150
  ### Option 3: Use another SDXL API
 
102
 
103
  ```bash
104
  LLM_ENGINE="OPENAI"
105
+
106
  # default openai api base url is: https://api.openai.com/v1
107
+ LLM_OPENAI_API_BASE_URL="Your OpenAI API Base URL"
108
+
109
+ LLM_OPENAI_API_MODEL="gpt-3.5-turbo"
110
+
111
+ AUTH_OPENAI_API_KEY="Your OpenAI API Key"
112
  ```
113
 
114
  ### Option 4: Fork and modify the code to use a different LLM system
 
143
  ```bash
144
  RENDERING_ENGINE="REPLICATE"
145
 
146
+ RENDERING_REPLICATE_API_MODEL="stabilityai/sdxl"
147
 
148
+ RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
149
 
150
+ AUTH_REPLICATE_API_TOKEN="Your Replicate token"
151
  ```
152
 
153
  ### Option 3: Use another SDXL API
src/app/queries/predict.ts CHANGED
@@ -1,9 +1,15 @@
1
  "use server"
2
 
3
  import { LLMEngine } from "@/types"
4
- import { predictWithHuggingFace } from "./predictWithHuggingFace"
5
- import { predictWithOpenAI } from "./predictWithOpenAI"
6
 
7
  const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
8
 
9
- export const predict = llmEngine === "OPENAI" ? predictWithOpenAI : predictWithHuggingFace
 
 
 
 
 
 
 
 
 
1
  "use server"
2
 
3
  import { LLMEngine } from "@/types"
 
 
4
 
5
  const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
6
 
7
+ export const predict = async () => {
8
+ if (llmEngine === "OPENAI") {
9
+ const module = await import("./predictWithOpenAI")
10
+ return module.predictWithOpenAI
11
+ } else {
12
+ const module = await import("./predictWithHuggingFace")
13
+ return module.predictWithHuggingFace
14
+ }
15
+ }