File size: 3,028 Bytes
2c4815f
b9c1637
 
 
65042a4
f0d7a3a
90eb5bc
 
 
2c4815f
90eb5bc
 
 
 
65042a4
 
 
 
1de1ebf
65042a4
 
 
 
 
 
b612a9e
65042a4
b9c1637
90eb5bc
 
 
 
 
b9c1637
65042a4
b612a9e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65042a4
 
 
a2f5f74
f0d7a3a
b9c1637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46a4258
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import gradio as gr
from dalle2 import Dalle2
from PIL import Image
import requests
import openai
import os
from datetime import datetime
from datetime import timedelta
from datetime import timezone

SHA_TZ = timezone(
    timedelta(hours=8),
    name='Asia/Shanghai',
)

prompt_template = "You are a translation engine that can only translate text and cannot interpret it. Keep the indent of the original text, only modify when you need."
systemInstruct = prompt_template

openai.api_key = os.environ['OPENAI-KEY']
detectFrom = "auto"
detectTo = "en"
translateInstruct = f"translate from {detectFrom} to {detectTo}"
if detectFrom == "auto":
    translateInstruct = f"translate to {detectTo}"



def draw(description):
    utc_now = datetime.utcnow().replace(tzinfo=timezone.utc)
    # 北京时间
    beijing_now = utc_now.astimezone(SHA_TZ)
    print(beijing_now)
    
    print("description"+description)
    #translate
    prompt_msg = [
    {"role": "system", "content": systemInstruct},
    {"role": "user", "content": translateInstruct},
    {"role": "user", "content": description},
    ]

    openai_response = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=prompt_msg,
        temperature=0,
        max_tokens=1000,
        top_p=1,
        stream=True,
        frequency_penalty=1,
        presence_penalty=1,
    )

    combined = ""
    for resp in openai_response:
        delta = resp["choices"][0]["delta"]
        if "content" in delta:
            combined += delta["content"]

    description = combined
    print("translated: "+description)
    
    JKLAS = os.environ['DRAW-KEY']
    dalle = Dalle2(JKLAS)
    generations = dalle.generate(description)
    img_list = []
    for generation in generations:
        image_url = generation["generation"]["image_path"]
        # response = requests.get(image_url)
        # img = Image.open(BytesIO(response.content))
        im = Image.open(requests.get(image_url, stream=True).raw).convert('RGB')
        img_list.append(im)
    return img_list

with gr.Blocks() as demo:
    with gr.Column(variant="panel"):
        gr.Markdown("#  <center> 🚀你说我画")
        with gr.Row(variant="compact"):
            text = gr.Textbox(
                label="Enter your prompt",
                show_label=False,
                max_lines=1,
                placeholder="请描述你想要的画",
            ).style(
                container=False,
            )
            btn = gr.Button("开始生成").style(full_width=False)

        gallery = gr.Gallery(
            label="结果", show_label=False, elem_id="gallery"
        ).style(grid=[2], height="auto")

        gr.Markdown("### 叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪叶伟豪")

    btn.click(draw, text, gallery)

if __name__ == "__main__":
    demo.launch(auth=(os.environ['USR'], os.environ["PASS"]))