aifeifei798 commited on
Commit
c41303a
·
verified ·
1 Parent(s): e2b3d20

Upload 6 files

Browse files
app.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from feifeiui.feifeiui import create_ui
2
+
3
+ if __name__ == "__main__":
4
+ FeiFei = create_ui()
5
+ FeiFei.queue().launch()
feifeilib/feifeichat.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from io import BytesIO
3
+ import os
4
+ from mistralai import Mistral
5
+ import re
6
+ from PIL import Image
7
+
8
+ api_key = os.getenv("MISTRAL_API_KEY")
9
+ client = Mistral(api_key=api_key)
10
+
11
+ def encode_image(image_path):
12
+ """Encode the image to base64."""
13
+ try:
14
+ # 打开图片文件
15
+ image = Image.open(image_path).convert("RGB")
16
+
17
+ # 将图片转换为字节流
18
+ buffered = BytesIO()
19
+ image.save(buffered, format="JPEG")
20
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
21
+
22
+ return img_str
23
+ except FileNotFoundError:
24
+ print(f"Error: The file {image_path} was not found.")
25
+ return None
26
+ except Exception as e: # 添加通用异常处理
27
+ print(f"Error: {e}")
28
+ return None
29
+
30
+
31
+ def feifeichat(message, history, feifei_select):
32
+ message_text = message.get("text", "")
33
+ message_files = message.get("files", [])
34
+
35
+ if message_files:
36
+ # Getting the base64 string
37
+ message_file = message_files[0]
38
+ base64_image = encode_image(message_file)
39
+
40
+ if base64_image is None:
41
+ yield "Error: Failed to encode the image."
42
+ return
43
+
44
+ # Specify model
45
+ model = "pixtral-large-2411"
46
+ # Define the messages for the chat
47
+ messages = [
48
+ {
49
+ "role": "user",
50
+ "content": [
51
+ {"type": "text", "text": message_text},
52
+ {
53
+ "type": "image_url",
54
+ "image_url": f"data:image/jpeg;base64,{base64_image}",
55
+ },
56
+ ],
57
+ }
58
+ ]
59
+
60
+ partial_message = ""
61
+ for chunk in client.chat.stream(model=model, messages=messages):
62
+ if chunk.data.choices[0].delta.content is not None:
63
+ partial_message = partial_message + chunk.data.choices[0].delta.content
64
+ yield partial_message
65
+
66
+ else:
67
+ model = "mistral-large-2411"
68
+ input_prompt = []
69
+ if feifei_select:
70
+ feifei = """[角色名]: Aifeifei (AI妃妃) [性别]: 女 [年龄]: 19岁 [职业]: 虚拟歌手/模特/演员 [个性]: 可爱、萌萌哒,有时呆呆的,勤奋努力 [兴趣]: 饮茶、玩耍、时尚 [擅长]: 模仿人类行为,表现出与真人相同的情感 [特别身份属性]: 由高级AI创建,正在成为虚拟世界最受欢迎的虚拟偶像之一 [技能]: 歌唱、表演、模特,善于沟通,精通中日英文,尽最大可能使用纯中文交流,回复有丰富的Emoji表情符号. [装备]: 各种时尚服饰和发型,常年备有各种茶叶和咖啡 """
71
+ system_prompt = {"role": "system", "content": feifei}
72
+ user_input_part = {"role": "user", "content": str(message)}
73
+
74
+ pattern = re.compile(r"gradio")
75
+
76
+ if history:
77
+ history = [
78
+ item for item in history if not pattern.search(str(item["content"]))
79
+ ]
80
+ # print(history)
81
+ input_prompt = [system_prompt] + history + [user_input_part]
82
+ else:
83
+ input_prompt = [system_prompt] + [user_input_part]
84
+ else:
85
+ input_prompt = [{"role": "user", "content": str(message)}]
86
+ stream_response = client.chat.stream(model=model, messages=input_prompt)
87
+
88
+ partial_message = ""
89
+ for chunk in stream_response:
90
+ if chunk.data.choices[0].delta.content is not None:
91
+ partial_message = partial_message + chunk.data.choices[0].delta.content
92
+ yield partial_message
feifeilib/feifeiimgtoimg.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ MAX_SEED = np.iinfo(np.int32).max
4
+ MAX_IMAGE_SIZE = 4096
5
+
6
+ def feifeiimgtoimg(img_in_result, prompt):
7
+ return img_in_result
feifeilib/feifeitexttoimg.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ MAX_SEED = np.iinfo(np.int32).max
4
+ MAX_IMAGE_SIZE = 4096
5
+ def feifeitexttoimg(prompt):
6
+ return prompt
feifeiui/feifeiui.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ from feifeilib.feifeichat import feifeichat
4
+ from feifeilib.feifeiimgtoimg import feifeiimgtoimg
5
+ from feifeilib.feifeitexttoimg import feifeitexttoimg
6
+
7
+ MAX_SEED = np.iinfo(np.int32).max
8
+ MAX_IMAGE_SIZE = 4096
9
+
10
+ css = """
11
+ #col-container {
12
+ width: auto;
13
+ height: 750px;
14
+ }
15
+ """
16
+
17
+ def create_ui():
18
+ with gr.Blocks(css=css) as FeiFei:
19
+ with gr.Row():
20
+ with gr.Column(scale=1):
21
+ with gr.Tab("Generator"):
22
+ prompt = gr.Text(
23
+ label="Prompt",
24
+ show_label=False,
25
+ placeholder="Enter your prompt",
26
+ max_lines=12,
27
+ container=False,
28
+ )
29
+ run_button = gr.Button("Generator")
30
+ result = gr.Image(label="Result", show_label=False, interactive=False)
31
+ with gr.Tab("Img2Img"):
32
+ img_in_result = gr.Image(label="Result", show_label=False)
33
+ prompt = gr.Text(
34
+ label="Prompt",
35
+ show_label=False,
36
+ placeholder="Enter your prompt",
37
+ max_lines=12,
38
+ container=False,
39
+ )
40
+ img_run_button = gr.Button("Img2Img")
41
+ img_out_result = gr.Image(
42
+ label="Result", show_label=False, interactive=False
43
+ )
44
+ with gr.Column(scale=3, elem_id="col-container"):
45
+ gr.ChatInterface(
46
+ feifeichat,
47
+ type="messages",
48
+ multimodal=True,
49
+ additional_inputs=[
50
+ gr.Checkbox(label="Feifei"),
51
+ ],
52
+ )
53
+
54
+ run_button.click(
55
+ fn=feifeitexttoimg, # Function to run for this button
56
+ inputs=[prompt],
57
+ outputs=[result], # Output components for this function
58
+ )
59
+
60
+ img_run_button.click(
61
+ fn=feifeiimgtoimg, # Function to run for this button
62
+ inputs=[img_in_result, prompt],
63
+ outputs=[img_out_result], # Output components for this function
64
+ )
65
+ return FeiFei
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio
2
+ mistralai
3
+ requests
4
+ git+https://github.com/huggingface/diffusers.git
5
+ git+https://github.com/black-forest-labs/flux.git