seawolf2357 commited on
Commit
0afef0f
1 Parent(s): 355176a

initial coomit

Browse files
Files changed (1) hide show
  1. app.py +2 -81
app.py CHANGED
@@ -1,12 +1,5 @@
1
  from googletrans import Translator
2
  import gradio as gr
3
- import torch
4
- from torch import autocast
5
- from diffusers import StableDiffusionPipeline
6
- from datasets import load_dataset
7
- from PIL import Image
8
- import re
9
- import os
10
 
11
  translator = Translator()
12
 
@@ -15,82 +8,10 @@ def translation(text):
15
  res=translation.text
16
  return res
17
 
 
18
  inp_text = gr.inputs.Textbox(label='Input')
19
  output = gr.outputs.Textbox(label='Output')
20
- gr.Interface(fn=translation, inputs=inp_text, outputs=output, title='Translation',theme='peach')
21
-
22
-
23
- auth_token = os.getenv("auth_token")
24
- model_id = "CompVis/stable-diffusion-v1-4"
25
- device = "cpu"
26
- pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=auth_token, revision="fp16", torch_dtype=torch.float16)
27
- pipe = pipe.to(device)
28
-
29
- def infer(prompt, samples, steps, scale, seed):
30
- generator = torch.Generator(device=device).manual_seed(seed)
31
- images_list = pipe(
32
- [prompt] * samples,
33
- num_inference_steps=steps,
34
- guidance_scale=scale,
35
- generator=generator,
36
- )
37
- images = []
38
- safe_image = Image.open(r"unsafe.png")
39
- for i, image in enumerate(images_list["sample"]):
40
- if(images_list["nsfw_content_detected"][i]):
41
- images.append(safe_image)
42
- else:
43
- images.append(image)
44
- return images
45
-
46
-
47
-
48
- block = gr.Blocks()
49
-
50
- with block:
51
- with gr.Group():
52
- with gr.Box():
53
- with gr.Row().style(mobile_collapse=False, equal_height=True):
54
- text = gr.Textbox(
55
- label="Enter your prompt",
56
- show_label=False,
57
- max_lines=1,
58
- placeholder="Enter your prompt",
59
- ).style(
60
- border=(True, False, True, True),
61
- rounded=(True, False, False, True),
62
- container=False,
63
- )
64
- btn = gr.Button("Generate image").style(
65
- margin=False,
66
- rounded=(False, True, True, False),
67
- )
68
- gallery = gr.Gallery(
69
- label="Generated images", show_label=False, elem_id="gallery"
70
- ).style(grid=[2], height="auto")
71
 
72
- advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
73
 
74
- with gr.Row(elem_id="advanced-options"):
75
- samples = gr.Slider(label="Images", minimum=1, maximum=4, value=4, step=1)
76
- steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=45, step=1)
77
- scale = gr.Slider(
78
- label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1
79
- )
80
- seed = gr.Slider(
81
- label="Seed",
82
- minimum=0,
83
- maximum=2147483647,
84
- step=1,
85
- randomize=True,
86
- )
87
- text.submit(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)
88
- btn.click(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)
89
- advanced_button.click(
90
- None,
91
- [],
92
- text,
93
- )
94
-
95
- block.launch()
96
 
 
1
  from googletrans import Translator
2
  import gradio as gr
 
 
 
 
 
 
 
3
 
4
  translator = Translator()
5
 
 
8
  res=translation.text
9
  return res
10
 
11
+
12
  inp_text = gr.inputs.Textbox(label='Input')
13
  output = gr.outputs.Textbox(label='Output')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ gr.Interface(fn=translation, inputs=inp_text, outputs=output, title='Translation',theme='peach').launch(enable_queue=True)
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17