File size: 4,449 Bytes
5dea875
 
 
 
aa97daf
 
5dea875
 
 
 
 
 
 
 
 
 
 
 
c12e621
5dea875
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c12e621
5dea875
 
 
 
c12e621
5dea875
 
248e8fc
5dea875
 
 
 
 
248e8fc
5dea875
 
 
 
 
 
 
 
 
 
 
c12e621
5dea875
aa97daf
 
 
 
 
 
 
 
 
 
5dea875
 
 
 
 
 
 
 
3a15fa2
5dea875
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import gradio as gr
import os 
import sys
from pathlib import Path
from transformers import pipeline
pipe = pipeline('text-generation', model_id='Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator')
models = [
    "Yntec/NovelAIRemix",
    "Joeythemonster/anything-midjourney-v-4-1",
    "prompthero/midjourney-v4-diffusion",
    "stablediffusionapi/dreamshaper-v6", #239
    "stablediffusionapi/disneypixar",
    "emilianJR/epiCRealism",
    "prompthero/openjourney",
    "stablediffusionapi/realistic-vision-v20-2047",
    "stablediffusionapi/wand-magic2",
    "dwancin/memoji", #07.11
    "stablediffusionapi/anime-model-v2",
    "AIARTCHAN/MIX-Pro-V4"
]
current_model = models[0]

text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")

models2=[
    gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[2]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[3]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[4]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[5]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[6]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[7]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[8]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[9]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[10]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[11]}",live=True,preprocess=False),
]

   
def text_it1(inputs,text_gen1=text_gen1):
        go_t1=text_gen1(inputs)
        return(go_t1)

def set_model(current_model):
    current_model = models[current_model]
    return gr.update(label=(f"{current_model}"))


def send_it1(inputs, model_choice):
        proc1=models2[model_choice]
        output1=proc1(inputs)
        return(output1)
css=""""""


with gr.Blocks(css=css) as myface:
    gr.HTML("""<!DOCTYPE html>
<html lang="en">
  <head>
    <meta charset="utf-8" />
    <meta name="twitter:card" content="player"/>
    <meta name="twitter:site" content=""/>
    <meta name="twitter:player:width" content="100%"/>
    <meta name="twitter:player:height" content="600"/>    
  </head>
</html>
""")
    with gr.Row():
        with gr.Tab("Title"):
                gr.HTML("""    <title>A open-beta for precious people.</title><div style="text-align: center; max-width: 1500px; margin: 0 auto;">
                                            """)

        with gr.Tab("Description"):
            gr.HTML("""<div style="text-align:center;">
                       </div>""")     
    with gr.Row():
        with gr.Column(scale=100):
            magic1 = gr.Textbox(lines=4)
            gr.HTML("""<style>           .gr-button {
            color: white !important;
            border-color: #000000 !important;
            background: #006699 !important;
            }</style>""")
            run = gr.Button("Generate Image")
    with gr.Row():
        with gr.Column(scale=100):
            #Model selection dropdown    
            model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
    with gr.Row():
        with gr.Column(style="width=800px"):
            output1=gr.Image(label=(f"{current_model}"))
                
            
    with gr.Row():
        with gr.Column(scale=50):
            input_text=gr.Textbox(label="Use this box to extend an idea automatically, by typing some words and clicking Extend Idea",lines=2)
            see_prompts=gr.Button("Extend Idea")

    def get_valid_prompt(text: str) -> str:
      dot_split = text.split('.')[0]
      n_split = text.split('\n')[0]

      return {
        len(dot_split) < len(n_split): dot_split,
        len(n_split) > len(dot_split): n_split,
        len(n_split) == len(dot_split): dot_split   
      }[True]
    def short_prompt(inputs):
        return(inputs)
    
    model_name1.change(set_model,inputs=model_name1,outputs=[output1])
    
    run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
    
    
    see_prompts.click(magic1 = get_valid_prompt(pipe(input_text, max_length=77)[0]['generated_text']))
    
myface.queue(concurrency_count=200)
myface.launch(inline=True, show_api=False, max_threads=400)