File size: 6,791 Bytes
eaa9650
 
 
 
 
37c41a7
 
 
 
 
 
 
 
 
28d0973
37c41a7
 
 
 
eaa9650
 
 
 
 
d8389e8
37c41a7
3f6c118
 
 
b46eb2a
 
3f6c118
 
 
 
 
37c41a7
bfb46e9
37c41a7
3f6c118
 
 
 
37c41a7
3f6c118
b46eb2a
3f6c118
d8389e8
3f6c118
d8389e8
3f6c118
d8389e8
 
3f6c118
 
 
 
 
 
 
d8389e8
3f6c118
d8389e8
 
3f6c118
 
d8389e8
3f6c118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d8389e8
3f6c118
 
 
37c41a7
eaa9650
b15e4f3
0c86bfd
cdc2f90
 
 
0579504
cdc2f90
 
 
 
0c86bfd
 
 
 
 
 
 
4a778bd
0c86bfd
15cf57c
 
0c86bfd
 
 
 
 
 
 
 
13d267c
0579504
0c86bfd
cdc2f90
 
0c86bfd
 
0579504
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c86bfd
0579504
 
 
 
 
 
 
 
 
0c86bfd
f112aea
0c86bfd
 
 
 
 
cdc2f90
d8389e8
cdc2f90
fe073ef
 
cdc2f90
 
2e14e78
eaa9650
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
import gradio as gr
from urllib.parse import urlparse
import requests
import time
import os
import re

hf_token = os.environ.get("HF_TOKEN")
from gradio_client import Client
client = Client("fffiloni/safety-checker-bot", hf_token=hf_token)

def safety_check(user_prompt):
    
    response = client.predict(
        "consistent-character space",    # str source space
        user_prompt,	# str  in 'User sent this' Textbox component
        api_name="/infer"
    )
    return response

from utils.gradio_helpers import parse_outputs, process_outputs

names = ['prompt', 'negative_prompt', 'subject', 'number_of_outputs', 'number_of_images_per_pose', 'randomise_poses', 'output_format', 'output_quality', 'seed']

def predict(request: gr.Request, *args, progress=gr.Progress(track_tqdm=True)):
    print(f"""
        —/n
        {args[0]}
        """)
    if args[0] == '' or args[0] is None:
        raise gr.Error(f"You forgot to provide a prompt.")
    
    try:
        
        is_safe = safety_check(args[0])
        print(is_safe)
    
        match = re.search(r'\bYes\b', is_safe)
    
        if match:
            status = 'Yes'
        else:
            status = None
    
        if status == "Yes" :
            raise gr.Error("Do not ask for such things.")
        else:
    
            headers = {'Content-Type': 'application/json'}
    
            payload = {"input": {}}
        
        
            base_url = "http://0.0.0.0:7860"
            for i, key in enumerate(names):
                value = args[i]
                if value and (os.path.exists(str(value))):
                    value = f"{base_url}/file=" + value
                if value is not None and value != "":
                    payload["input"][key] = value
    
            response = requests.post("http://0.0.0.0:5000/predictions", headers=headers, json=payload)
    
        
            if response.status_code == 201:
                follow_up_url = response.json()["urls"]["get"]
                response = requests.get(follow_up_url, headers=headers)
                while response.json()["status"] != "succeeded":
                    if response.json()["status"] == "failed":
                        raise gr.Error("The submission failed!")
                    response = requests.get(follow_up_url, headers=headers)
                    time.sleep(1)
            if response.status_code == 200:
                json_response = response.json()
                #If the output component is JSON return the entire output response 
                if(outputs[0].get_config()["name"] == "json"):
                    return json_response["output"]
                predict_outputs = parse_outputs(json_response["output"])
                processed_outputs = process_outputs(predict_outputs)        
                return tuple(processed_outputs) if len(processed_outputs) > 1 else processed_outputs[0]
            else:
                if(response.status_code == 409):
                    raise gr.Error(f"Sorry, the Cog image is still processing. Try again in a bit.")
                raise gr.Error(f"The submission failed! Error: {response.status_code}")

    except Exception as e:
        # Handle any other type of error
        raise gr.Error(f"An error occurred: {e}")

title = "Demo for consistent-character cog image by fofr"
description = "Create images of a given character in different poses • running cog image by fofr"

css="""
#col-container{
    margin: 0 auto;
    max-width: 1400px;
    text-align: left;
}
"""
with gr.Blocks(css=css) as app:
    with gr.Column(elem_id="col-container"):
        gr.HTML(f"""
        <h2 style="text-align: center;">Consistent Character Workflow</h2>
        <p style="text-align: center;">{description}</p>
        """)

        with gr.Row():
            with gr.Column(scale=1):
                prompt = gr.Textbox(
                    label="Prompt", info='''Describe the subject. Include clothes and hairstyle for more consistency.''',
                    value="a person, darkblue suit, black tie, white pocket"
                )
        
                subject = gr.Image(
                    label="Subject", type="filepath"
                )

                submit_btn = gr.Button("Submit")

                with gr.Accordion(label="Advanced Settings", open=False):
                    
                    negative_prompt = gr.Textbox(
                        label="Negative Prompt", info='''Things you do not want to see in your image''',
                        value="text, watermark, lowres, low quality, worst quality, deformed, glitch, low contrast, noisy, saturation, blurry"
                    )

                    with gr.Row():

                        number_of_outputs = gr.Slider(
                            label="Number Of Outputs", info='''The number of images to generate.''', value=2,
                            minimum=1, maximum=4, step=1,
                        )
                        
                        number_of_images_per_pose = gr.Slider(
                            label="Number Of Images Per Pose", info='''The number of images to generate for each pose.''', value=1,
                            minimum=1, maximum=4, step=1,
                        )

                    with gr.Row():
                        
                        randomise_poses = gr.Checkbox(
                            label="Randomise Poses", info='''Randomise the poses used.''', value=True
                        )
                        
                        output_format = gr.Dropdown(
                            choices=['webp', 'jpg', 'png'], label="output_format", info='''Format of the output images''', value="webp"
                        )
                    
                    with gr.Row():
                        
                        output_quality = gr.Number(
                            label="Output Quality", info='''Quality of the output images, from 0 to 100. 100 is best quality, 0 is lowest quality.''', value=80
                        )
                        
                        seed = gr.Number(
                            label="Seed", info='''Set a seed for reproducibility. Random by default.''', value=None
                        )

            with gr.Column(scale=1.5):
                consistent_results = gr.Gallery(label="Consistent Results")

    inputs = [prompt, negative_prompt, subject, number_of_outputs, number_of_images_per_pose, randomise_poses, output_format, output_quality, seed]
    outputs = [consistent_results]

    submit_btn.click(
        fn = predict,
        inputs = inputs,
        outputs = outputs,
        show_api = False
    )

app.queue(max_size=12, api_open=False).launch(share=False, show_api=False, show_error=True)