File size: 5,663 Bytes
e28c066
 
 
3a2fa99
e28c066
 
 
 
 
3d9bffc
 
d5ac15d
516a159
d5ac15d
 
 
 
 
 
 
 
 
 
 
3d9bffc
 
e28c066
 
 
 
 
 
3d9bffc
 
d4ef5bd
e28c066
 
 
 
d4ef5bd
 
e28c066
 
a3321db
 
e28c066
 
 
 
 
a3321db
e28c066
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d5ac15d
 
e28c066
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d9bffc
 
 
 
 
e28c066
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fcf474e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
#!/usr/bin/env python

import gradio as gr
import openai

from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
                      MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
from utils import randomize_seed_fn

def magic_prompt(prompt):
    openai.api_key = "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"
    mag_prompt=""
    if(prompt=="" or prompt=="Please write a prompt first ✍️"):
        mag_prompt = "Please write a prompt first ✍️"
    else:
        completion = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=[
            {"role": "system", "content": "You are a rephraser."},
            {"role": "user", "content": "You will be rephrasing the given details (details will be given in triple backticks) about an object/product. For example if “gray metal moka pot with black holder on wooden surface” is the given details, it should be rephrased as “moka pot, gray metal body, black holder, on wooden surface”. Output should be in coma separated form. Don’t add any comment and additional feature. Just give 1 output at a time. The given details “””{prompt}”””".format(prompt=prompt)}
          ]
        )
    
        mag_prompt = str(completion.choices[0].message.content)
    return mag_prompt


def create_demo(process):
    with gr.Blocks() as demo:
        with gr.Row():
            with gr.Column():
                image = gr.Image()
                prompt = gr.Textbox(label='Prompt')
                mag_prompt_btn = gr.Button("✨Magic Prompt")
                preprocessor_name = gr.Radio(
                        label='Preprocessor',
                        choices=['HED', 'PidiNet', 'None'],
                        type='value',
                        value='HED')
                run_button = gr.Button('Run')
                with gr.Accordion('Advanced options', open=False, visible=False):
                    num_samples = gr.Slider(label='Number of images',
                                            minimum=1,
                                            maximum=4,
                                            value=4,
                                            step=1)
                    image_resolution = gr.Slider(
                        label='Image resolution',
                        minimum=256,
                        maximum=MAX_IMAGE_RESOLUTION,
                        value=512,
                        step=256)
                    preprocess_resolution = gr.Slider(
                        label='Preprocess resolution',
                        minimum=128,
                        maximum=512,
                        value=512,
                        step=1)
                    num_steps = gr.Slider(label='Number of steps',
                                          minimum=1,
                                          maximum=100,
                                          value=20,
                                          step=1)
                    guidance_scale = gr.Slider(label='Guidance scale',
                                               minimum=0.1,
                                               maximum=30.0,
                                               value=9.0,
                                               step=0.1)
                    seed = gr.Slider(label='Seed',
                                     minimum=0,
                                     maximum=MAX_SEED,
                                     step=1,
                                     value=0)
                    randomize_seed = gr.Checkbox(label='Randomize seed',
                                                 value=True)
                    a_prompt = gr.Textbox(
                        label='Additional prompt',
                        value='best quality, extremely detailed')
                    n_prompt = gr.Textbox(
                        label='Negative prompt',
                        value=
                        'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
                    )
            with gr.Column():
                result = gr.Gallery(label='Output',
                                    show_label=True,
                                    preview=True,
                                    columns=2,
                                    object_fit='scale-down')
        inputs = [
            image,
            prompt,
            a_prompt,
            n_prompt,
            num_samples,
            image_resolution,
            preprocess_resolution,
            num_steps,
            guidance_scale,
            seed,
            preprocessor_name,
        ]
        prompt.submit(
            fn=randomize_seed_fn,
            inputs=[seed, randomize_seed],
            outputs=seed,
            queue=False,
            api_name=False,
        ).then(
            fn=process,
            inputs=inputs,
            outputs=result,
            api_name=False,
        )
        mag_prompt_btn.click(
            fn=magic_prompt,
            inputs=prompt,
            outputs=prompt,
        )
        run_button.click(
            fn=randomize_seed_fn,
            inputs=[seed, randomize_seed],
            outputs=seed,
            queue=False,
            api_name=False,
        ).then(
            fn=process,
            inputs=inputs,
            outputs=result,
            api_name='scribble',
        )
    return demo


if __name__ == '__main__':
    from model import Model
    model = Model(task_name='scribble')
    demo = create_demo(model.process_scribble)
    demo.queue().launch()