File size: 2,720 Bytes
2c4815f
b9c1637
 
 
65042a4
f0d7a3a
2c4815f
65042a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9c1637
 
65042a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52e2524
f0d7a3a
b9c1637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import gradio as gr
from dalle2 import Dalle2
from PIL import Image
import requests
import openai
import os


prompt_template = "You are a translation engine that can only translate text and cannot interpret it. Keep the indent of the original text, only modify when you need."
systemInstruct = prompt_template

openai.api_key = os.environ['JKLAS']
detectFrom = "auto"
detectTo = "en"
translateInstruct = f"translate from {detectFrom} to {detectTo}"
if detectFrom == "auto":
    translateInstruct = f"translate to {detectTo}"

prompt_msg = [
    {"role": "system", "content": systemInstruct},
    {"role": "user", "content": translateInstruct},
    {"role": "user", "content": prompt},
]

def draw(description):
    print("description"+description)
    #translate
    try:
        openai_response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=prompt_msg,
            temperature=0,
            max_tokens=1000,
            top_p=1,
            stream=True,
            frequency_penalty=1,
            presence_penalty=1,
        )

        combined = ""
        for resp in openai_response:
            delta = resp["choices"][0]["delta"]
            if "content" in delta:
                combined += delta["content"]
    
    description = combined
    print("translated: "+description)
    
    JKLAS = os.environ["JKLAS"] 
    dalle = Dalle2(JKLAS)
    generations = dalle.generate(description)
    img_list = []
    for generation in generations:
        image_url = generation["generation"]["image_path"]
        # response = requests.get(image_url)
        # img = Image.open(BytesIO(response.content))
        im = Image.open(requests.get(image_url, stream=True).raw).convert('RGB')
        img_list.append(im)
    return img_list

with gr.Blocks() as demo:
    with gr.Column(variant="panel"):
        gr.Markdown("#  <center> 🚀你说我画")
        with gr.Row(variant="compact"):
            text = gr.Textbox(
                label="Enter your prompt",
                show_label=False,
                max_lines=1,
                placeholder="请描述你想要的画",
            ).style(
                container=False,
            )
            btn = gr.Button("开始生成").style(full_width=False)

        gallery = gr.Gallery(
            label="结果", show_label=False, elem_id="gallery"
        ).style(grid=[2], height="auto")

        gr.Markdown("### 叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪")

    btn.click(draw, text, gallery)

if __name__ == "__main__":
    demo.launch()