Feliciano Long commited on
Commit
0b2092a
1 Parent(s): cae1b59

feat: add mj image generation support through proxy api (#871)

Browse files

* add mj image generation support through proxy api

* reverse commit

.gitignore CHANGED
@@ -145,3 +145,4 @@ lora/
145
  .idea
146
  templates/*
147
  files/
 
 
145
  .idea
146
  templates/*
147
  files/
148
+ tmp/
config_example.json CHANGED
@@ -7,6 +7,11 @@
7
  "xmchat_api_key": "", // 你的 xmchat API Key,用于 XMChat 对话模型
8
  "minimax_api_key": "", // 你的 MiniMax API Key,用于 MiniMax 对话模型
9
  "minimax_group_id": "", // 你的 MiniMax Group ID,用于 MiniMax 对话模型
 
 
 
 
 
10
 
11
  //== Azure ==
12
  "openai_api_type": "openai", // 可选项:azure, openai
 
7
  "xmchat_api_key": "", // 你的 xmchat API Key,用于 XMChat 对话模型
8
  "minimax_api_key": "", // 你的 MiniMax API Key,用于 MiniMax 对话模型
9
  "minimax_group_id": "", // 你的 MiniMax Group ID,用于 MiniMax 对话模型
10
+ "midjourney_proxy_api_base": "https://xxx/mj", // 你的 https://github.com/novicezk/midjourney-proxy 代理地址
11
+ "midjourney_proxy_api_secret": "", // 你的 MidJourney Proxy API Secret,用于鉴权访问 api,可选
12
+ "midjourney_discord_proxy_url": "", // 你的 MidJourney Discord Proxy URL,用于对生成对图进行反代,可选
13
+ "midjourney_temp_folder": "./tmp", // 你的 MidJourney 临时文件夹,用于存放生成的图片,填空则关闭自动下载切图(直接显示MJ的四宫格图)
14
+
15
 
16
  //== Azure ==
17
  "openai_api_type": "openai", // 可选项:azure, openai
modules/config.py CHANGED
@@ -114,6 +114,15 @@ os.environ["MINIMAX_API_KEY"] = minimax_api_key
114
  minimax_group_id = config.get("minimax_group_id", "")
115
  os.environ["MINIMAX_GROUP_ID"] = minimax_group_id
116
 
 
 
 
 
 
 
 
 
 
117
  load_config_to_environ(["openai_api_type", "azure_openai_api_key", "azure_openai_api_base_url",
118
  "azure_openai_api_version", "azure_deployment_name", "azure_embedding_deployment_name", "azure_embedding_model_name"])
119
 
 
114
  minimax_group_id = config.get("minimax_group_id", "")
115
  os.environ["MINIMAX_GROUP_ID"] = minimax_group_id
116
 
117
+ midjourney_proxy_api_base = config.get("midjourney_proxy_api_base", "")
118
+ os.environ["MIDJOURNEY_PROXY_API_BASE"] = midjourney_proxy_api_base
119
+ midjourney_proxy_api_secret = config.get("midjourney_proxy_api_secret", "")
120
+ os.environ["MIDJOURNEY_PROXY_API_SECRET"] = midjourney_proxy_api_secret
121
+ midjourney_discord_proxy_url = config.get("midjourney_discord_proxy_url", "")
122
+ os.environ["MIDJOURNEY_DISCORD_PROXY_URL"] = midjourney_discord_proxy_url
123
+ midjourney_temp_folder = config.get("midjourney_temp_folder", "")
124
+ os.environ["MIDJOURNEY_TEMP_FOLDER"] = midjourney_temp_folder
125
+
126
  load_config_to_environ(["openai_api_type", "azure_openai_api_key", "azure_openai_api_base_url",
127
  "azure_openai_api_version", "azure_deployment_name", "azure_embedding_deployment_name", "azure_embedding_model_name"])
128
 
modules/models/base_model.py CHANGED
@@ -141,6 +141,7 @@ class ModelType(Enum):
141
  ChuanhuAgent = 8
142
  GooglePaLM = 9
143
  LangchainChat = 10
 
144
 
145
  @classmethod
146
  def get_type(cls, model_name: str):
@@ -166,6 +167,8 @@ class ModelType(Enum):
166
  model_type = ModelType.ChuanhuAgent
167
  elif "palm" in model_name_lower:
168
  model_type = ModelType.GooglePaLM
 
 
169
  elif "azure" in model_name_lower or "api" in model_name_lower:
170
  model_type = ModelType.LangchainChat
171
  else:
 
141
  ChuanhuAgent = 8
142
  GooglePaLM = 9
143
  LangchainChat = 10
144
+ Midjourney = 11
145
 
146
  @classmethod
147
  def get_type(cls, model_name: str):
 
167
  model_type = ModelType.ChuanhuAgent
168
  elif "palm" in model_name_lower:
169
  model_type = ModelType.GooglePaLM
170
+ elif "midjourney" in model_name_lower:
171
+ model_type = ModelType.Midjourney
172
  elif "azure" in model_name_lower or "api" in model_name_lower:
173
  model_type = ModelType.LangchainChat
174
  else:
modules/models/midjourney.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ import json
4
+ import logging
5
+ import pathlib
6
+ import time
7
+ import tempfile
8
+ import os
9
+
10
+ from datetime import datetime
11
+
12
+ import requests
13
+ import tiktoken
14
+ from PIL import Image
15
+
16
+ from modules.config import retrieve_proxy
17
+ from modules.models.models import XMChat
18
+
19
+ mj_proxy_api_base = os.getenv("MIDJOURNEY_PROXY_API_BASE")
20
+ mj_discord_proxy_url = os.getenv("MIDJOURNEY_DISCORD_PROXY_URL")
21
+ mj_temp_folder = os.getenv("MIDJOURNEY_TEMP_FOLDER")
22
+
23
+
24
+ class Midjourney_Client(XMChat):
25
+
26
+ class FetchDataPack:
27
+ """
28
+ A class to store data for current fetching data from Midjourney API
29
+ """
30
+
31
+ action: str # current action, e.g. "IMAGINE", "UPSCALE", "VARIATION"
32
+ prefix_content: str # prefix content, task description and process hint
33
+ task_id: str # task id
34
+ start_time: float # task start timestamp
35
+ timeout: int # task timeout in seconds
36
+ finished: bool # whether the task is finished
37
+ prompt: str # prompt for the task
38
+
39
+ def __init__(self, action, prefix_content, task_id, timeout=900):
40
+ self.action = action
41
+ self.prefix_content = prefix_content
42
+ self.task_id = task_id
43
+ self.start_time = time.time()
44
+ self.timeout = timeout
45
+ self.finished = False
46
+
47
+ def __init__(self, model_name, api_key, user_name=""):
48
+ super().__init__(api_key, user_name)
49
+ self.model_name = model_name
50
+ self.history = []
51
+ self.api_key = api_key
52
+ self.headers = {
53
+ "Content-Type": "application/json",
54
+ "mj-api-secret": f"{api_key}"
55
+ }
56
+ self.proxy_url = mj_proxy_api_base
57
+ self.command_splitter = "::"
58
+
59
+ if mj_temp_folder:
60
+ temp = "./tmp"
61
+ if user_name:
62
+ temp = os.path.join(temp, user_name)
63
+ if not os.path.exists(temp):
64
+ os.makedirs(temp)
65
+ self.temp_path = tempfile.mkdtemp(dir=temp)
66
+ logging.info("mj temp folder: " + self.temp_path)
67
+ else:
68
+ self.temp_path = None
69
+
70
+ def use_mj_self_proxy_url(self, img_url):
71
+ """
72
+ replace discord cdn url with mj self proxy url
73
+ """
74
+ return img_url.replace(
75
+ "https://cdn.discordapp.com/",
76
+ mj_discord_proxy_url and mj_discord_proxy_url or "https://cdn.discordapp.com/"
77
+ )
78
+
79
+ def split_image(self, image_url):
80
+ """
81
+ when enabling temp dir, split image into 4 parts
82
+ """
83
+ with retrieve_proxy():
84
+ image_bytes = requests.get(image_url).content
85
+ img = Image.open(io.BytesIO(image_bytes))
86
+ width, height = img.size
87
+ # calculate half width and height
88
+ half_width = width // 2
89
+ half_height = height // 2
90
+ # create coordinates (top-left x, top-left y, bottom-right x, bottom-right y)
91
+ coordinates = [(0, 0, half_width, half_height),
92
+ (half_width, 0, width, half_height),
93
+ (0, half_height, half_width, height),
94
+ (half_width, half_height, width, height)]
95
+
96
+ images = [img.crop(c) for c in coordinates]
97
+ return images
98
+
99
+ def auth_mj(self):
100
+ """
101
+ auth midjourney api
102
+ """
103
+ # TODO: check if secret is valid
104
+ return {'status': 'ok'}
105
+
106
+ def request_mj(self, path: str, action: str, data: str, retries=3):
107
+ """
108
+ request midjourney api
109
+ """
110
+ mj_proxy_url = self.proxy_url
111
+ if mj_proxy_url is None or not (mj_proxy_url.startswith("http://") or mj_proxy_url.startswith("https://")):
112
+ raise Exception('please set MIDJOURNEY_PROXY_API_BASE in ENV or in config.json')
113
+
114
+ auth_ = self.auth_mj()
115
+ if auth_.get('error'):
116
+ raise Exception('auth not set')
117
+
118
+ fetch_url = f"{mj_proxy_url}/{path}"
119
+ # logging.info(f"[MJ Proxy] {action} {fetch_url} params: {data}")
120
+
121
+ for _ in range(retries):
122
+ try:
123
+ with retrieve_proxy():
124
+ res = requests.request(method=action, url=fetch_url, headers=self.headers, data=data)
125
+ break
126
+ except Exception as e:
127
+ print(e)
128
+
129
+ if res.status_code != 200:
130
+ raise Exception(f'{res.status_code} - {res.content}')
131
+
132
+ return res
133
+
134
+ def fetch_status(self, fetch_data: FetchDataPack):
135
+ """
136
+ fetch status of current task
137
+ """
138
+ if fetch_data.start_time + fetch_data.timeout < time.time():
139
+ fetch_data.finished = True
140
+ return "任务超时,请检查 dc 输出。描述:" + fetch_data.prompt
141
+
142
+ time.sleep(3)
143
+ status_res = self.request_mj(f"task/{fetch_data.task_id}/fetch", "GET", '')
144
+ status_res_json = status_res.json()
145
+ if not (200 <= status_res.status_code < 300):
146
+ raise Exception("任务状态获取失败:" + status_res_json.get(
147
+ 'error') or status_res_json.get('description') or '未知错误')
148
+ else:
149
+ fetch_data.finished = False
150
+ if status_res_json['status'] == "SUCCESS":
151
+ content = status_res_json['imageUrl']
152
+ fetch_data.finished = True
153
+ elif status_res_json['status'] == "FAILED":
154
+ content = status_res_json['failReason'] or '未知原因'
155
+ fetch_data.finished = True
156
+ elif status_res_json['status'] == "NOT_START":
157
+ content = f'任务未开始,已等待 {time.time() - fetch_data.start_time:.2f} 秒'
158
+ elif status_res_json['status'] == "IN_PROGRESS":
159
+ content = '任务正在运行'
160
+ if status_res_json.get('progress'):
161
+ content += f",进度:{status_res_json['progress']}"
162
+ elif status_res_json['status'] == "SUBMITTED":
163
+ content = '任务已提交处理'
164
+ elif status_res_json['status'] == "FAILURE":
165
+ fetch_data.finished = True
166
+ return "任务处理失败,原因:" + status_res_json['failReason'] or '未知原因'
167
+ else:
168
+ content = status_res_json['status']
169
+ if fetch_data.finished:
170
+ img_url = self.use_mj_self_proxy_url(status_res_json['imageUrl'])
171
+ if fetch_data.action == "DESCRIBE":
172
+ return f"\n{status_res_json['prompt']}"
173
+ time_cost_str = f"\n\n{fetch_data.action} 花费时间:{time.time() - fetch_data.start_time:.2f} 秒"
174
+ upscale_str = ""
175
+ variation_str = ""
176
+ if fetch_data.action in ["IMAGINE", "UPSCALE", "VARIATION"]:
177
+ upscale = [f'/mj UPSCALE{self.command_splitter}{i+1}{self.command_splitter}{fetch_data.task_id}'
178
+ for i in range(4)]
179
+ upscale_str = '\n放大图片:\n\n' + '\n\n'.join(upscale)
180
+ variation = [f'/mj VARIATION{self.command_splitter}{i+1}{self.command_splitter}{fetch_data.task_id}'
181
+ for i in range(4)]
182
+ variation_str = '\n图片变体:\n\n' + '\n\n'.join(variation)
183
+ if self.temp_path and fetch_data.action in ["IMAGINE", "VARIATION"]:
184
+ try:
185
+ images = self.split_image(img_url)
186
+ # save images to temp path
187
+ for i in range(4):
188
+ images[i].save(pathlib.Path(self.temp_path) / f"{fetch_data.task_id}_{i}.png")
189
+ img_str = '\n'.join(
190
+ [f"![{fetch_data.task_id}](/file={self.temp_path}/{fetch_data.task_id}_{i}.png)"
191
+ for i in range(4)])
192
+ return fetch_data.prefix_content + f"{time_cost_str}\n\n{img_str}{upscale_str}{variation_str}"
193
+ except Exception as e:
194
+ logging.error(e)
195
+ return fetch_data.prefix_content + \
196
+ f"{time_cost_str}[![{fetch_data.task_id}]({img_url})]({img_url}){upscale_str}{variation_str}"
197
+ else:
198
+ content = f"**任务状态:** [{(datetime.now()).strftime('%Y-%m-%d %H:%M:%S')}] - {content}"
199
+ content += f"\n\n花费时间:{time.time() - fetch_data.start_time:.2f} 秒"
200
+ if status_res_json['status'] == 'IN_PROGRESS' and status_res_json.get('imageUrl'):
201
+ img_url = status_res_json.get('imageUrl')
202
+ return f"{content}\n[![{fetch_data.task_id}]({img_url})]({img_url})"
203
+ return content
204
+ return None
205
+
206
+ def handle_file_upload(self, files, chatbot, language):
207
+ """
208
+ handle file upload
209
+ """
210
+ if files:
211
+ for file in files:
212
+ if file.name:
213
+ logging.info(f"尝试读取图像: {file.name}")
214
+ self.try_read_image(file.name)
215
+ if self.image_path is not None:
216
+ chatbot = chatbot + [((self.image_path,), None)]
217
+ if self.image_bytes is not None:
218
+ logging.info("使用图片作为输入")
219
+ return None, chatbot, None
220
+
221
+ def reset(self):
222
+ self.image_bytes = None
223
+ self.image_path = None
224
+ return [], "已重置"
225
+
226
+ def get_answer_at_once(self):
227
+ content = self.history[-1]['content']
228
+ answer = self.get_help()
229
+
230
+ if not content.lower().startswith("/mj"):
231
+ return answer, len(content)
232
+
233
+ prompt = content[3:].strip()
234
+ action = "IMAGINE"
235
+ first_split_index = prompt.find(self.command_splitter)
236
+ if first_split_index > 0:
237
+ action = prompt[:first_split_index]
238
+ if action not in ["IMAGINE", "DESCRIBE", "UPSCALE",
239
+ # "VARIATION", "BLEND", "REROLL"
240
+ ]:
241
+ raise Exception("任务提交失败:未知的任务类型")
242
+ else:
243
+ action_index = None
244
+ action_use_task_id = None
245
+ if action in ["VARIATION", "UPSCALE", "REROLL"]:
246
+ action_index = int(prompt[first_split_index + 2:first_split_index + 3])
247
+ action_use_task_id = prompt[first_split_index + 5:]
248
+
249
+ try:
250
+ res = None
251
+ if action == "IMAGINE":
252
+ data = {
253
+ "prompt": prompt
254
+ }
255
+ if self.image_bytes is not None:
256
+ data["base64"] = 'data:image/png;base64,' + self.image_bytes
257
+ res = self.request_mj("submit/imagine", "POST",
258
+ json.dumps(data))
259
+ elif action == "DESCRIBE":
260
+ res = self.request_mj("submit/describe", "POST",
261
+ json.dumps({"base64": 'data:image/png;base64,' + self.image_bytes}))
262
+ elif action == "BLEND":
263
+ res = self.request_mj("submit/blend", "POST", json.dumps(
264
+ {"base64Array": [self.image_bytes, self.image_bytes]}))
265
+ elif action in ["UPSCALE", "VARIATION", "REROLL"]:
266
+ res = self.request_mj(
267
+ "submit/change", "POST",
268
+ json.dumps({"action": action, "index": action_index, "taskId": action_use_task_id}))
269
+ res_json = res.json()
270
+ if not (200 <= res.status_code < 300) or (res_json['code'] not in [1, 22]):
271
+ answer = "任务提交失败:" + res_json.get('error', res_json.get('description', '未知错误'))
272
+ else:
273
+ task_id = res_json['result']
274
+ prefix_content = f"**画面描述:** {prompt}\n**任务ID:** {task_id}\n"
275
+
276
+ fetch_data = Midjourney_Client.FetchDataPack(
277
+ action=action,
278
+ prefix_content=prefix_content,
279
+ task_id=task_id,
280
+ )
281
+ fetch_data.prompt = prompt
282
+ while not fetch_data.finished:
283
+ answer = self.fetch_status(fetch_data)
284
+ except Exception as e:
285
+ logging.error("submit failed", e)
286
+ answer = "任务提交错误:" + str(e.args[0]) if e.args else '未知错误'
287
+
288
+ return answer, tiktoken.get_encoding("cl100k_base").encode(content)
289
+
290
+ def get_answer_stream_iter(self):
291
+ content = self.history[-1]['content']
292
+ answer = self.get_help()
293
+
294
+ if not content.lower().startswith("/mj"):
295
+ yield answer
296
+ return
297
+
298
+ prompt = content[3:].strip()
299
+ action = "IMAGINE"
300
+ first_split_index = prompt.find(self.command_splitter)
301
+ if first_split_index > 0:
302
+ action = prompt[:first_split_index]
303
+ if action not in ["IMAGINE", "DESCRIBE", "UPSCALE",
304
+ "VARIATION", "BLEND", "REROLL"
305
+ ]:
306
+ yield "任务提交失败:未知的任务类型"
307
+ return
308
+
309
+ action_index = None
310
+ action_use_task_id = None
311
+ if action in ["VARIATION", "UPSCALE", "REROLL"]:
312
+ action_index = int(prompt[first_split_index + 2:first_split_index + 3])
313
+ action_use_task_id = prompt[first_split_index + 5:]
314
+
315
+ try:
316
+ res = None
317
+ if action == "IMAGINE":
318
+ data = {
319
+ "prompt": prompt
320
+ }
321
+ if self.image_bytes is not None:
322
+ data["base64"] = 'data:image/png;base64,' + self.image_bytes
323
+ res = self.request_mj("submit/imagine", "POST",
324
+ json.dumps(data))
325
+ elif action == "DESCRIBE":
326
+ res = self.request_mj("submit/describe", "POST", json.dumps(
327
+ {"base64": 'data:image/png;base64,' + self.image_bytes}))
328
+ elif action == "BLEND":
329
+ res = self.request_mj("submit/blend", "POST", json.dumps(
330
+ {"base64Array": [self.image_bytes, self.image_bytes]}))
331
+ elif action in ["UPSCALE", "VARIATION", "REROLL"]:
332
+ res = self.request_mj(
333
+ "submit/change", "POST",
334
+ json.dumps({"action": action, "index": action_index, "taskId": action_use_task_id}))
335
+ res_json = res.json()
336
+ if not (200 <= res.status_code < 300) or (res_json['code'] not in [1, 22]):
337
+ yield "任务提交失败:" + res_json.get('error', res_json.get('description', '未知错误'))
338
+ else:
339
+ task_id = res_json['result']
340
+ prefix_content = f"**画面描述:** {prompt}\n**任务ID:** {task_id}\n"
341
+ content = f"[{(datetime.now()).strftime('%Y-%m-%d %H:%M:%S')}] - 任务提交成功:" + \
342
+ res_json.get('description') or '请稍等片刻'
343
+ yield content
344
+
345
+ fetch_data = Midjourney_Client.FetchDataPack(
346
+ action=action,
347
+ prefix_content=prefix_content,
348
+ task_id=task_id,
349
+ )
350
+ while not fetch_data.finished:
351
+ yield self.fetch_status(fetch_data)
352
+ except Exception as e:
353
+ logging.error('submit failed', e)
354
+ yield "任务提交错误:" + str(e.args[0]) if e.args else '未知错误'
355
+
356
+ def get_help(self):
357
+ return """```
358
+ 【绘图帮助】
359
+ 所有命令都需要以 /mj 开头,如:/mj a dog
360
+ IMAGINE - 绘图,可以省略该命令,后面跟上绘图内容
361
+ /mj a dog
362
+ /mj IMAGINE::a cat
363
+ DESCRIBE - 描述图片,需要在右下角上传需要描述的图片内容
364
+ /mj DESCRIBE::
365
+ UPSCALE - 确认后放大图片,第一个数值为需要放大的图片(1~4),第二参数为任务ID
366
+ /mj UPSCALE::1::123456789
367
+ 请使用SD进行UPSCALE
368
+ VARIATION - 图片变体,第一个数值为需要放大的图片(1~4),第二参数为任务ID
369
+ /mj VARIATION::1::123456789
370
+
371
+ 【绘图参数】
372
+ 所有命令默认会带上参数--v 5.2
373
+ 其他参数参照 https://docs.midjourney.com/docs/parameter-list
374
+ 长宽比 --aspect/--ar
375
+ --ar 1:2
376
+ --ar 16:9
377
+ 负面tag --no
378
+ --no plants
379
+ --no hands
380
+ 随机种子 --seed
381
+ --seed 1
382
+ 生成动漫风格(NijiJourney) --niji
383
+ --niji
384
+ ```
385
+ """
modules/models/models.py CHANGED
@@ -621,6 +621,10 @@ def get_model(
621
  elif model_type == ModelType.LangchainChat:
622
  from .azure import Azure_OpenAI_Client
623
  model = Azure_OpenAI_Client(model_name, user_name=user_name)
 
 
 
 
624
  elif model_type == ModelType.Unknown:
625
  raise ValueError(f"未知模型: {model_name}")
626
  logging.info(msg)
 
621
  elif model_type == ModelType.LangchainChat:
622
  from .azure import Azure_OpenAI_Client
623
  model = Azure_OpenAI_Client(model_name, user_name=user_name)
624
+ elif model_type == ModelType.Midjourney:
625
+ from .midjourney import Midjourney_Client
626
+ mj_proxy_api_secret = os.getenv("MIDJOURNEY_PROXY_API_SECRET")
627
+ model = Midjourney_Client(model_name, mj_proxy_api_secret, user_name=user_name)
628
  elif model_type == ModelType.Unknown:
629
  raise ValueError(f"未知模型: {model_name}")
630
  logging.info(msg)
modules/presets.py CHANGED
@@ -69,6 +69,7 @@ ONLINE_MODELS = [
69
  "yuanai-1.0-rhythm_poems",
70
  "minimax-abab4-chat",
71
  "minimax-abab5-chat",
 
72
  ]
73
 
74
  LOCAL_MODELS = [
 
69
  "yuanai-1.0-rhythm_poems",
70
  "minimax-abab4-chat",
71
  "minimax-abab5-chat",
72
+ "midjourney"
73
  ]
74
 
75
  LOCAL_MODELS = [