mrm8488 commited on
Commit
106fb6c
·
1 Parent(s): da9cad5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -13
app.py CHANGED
@@ -3,9 +3,7 @@ import os
3
  import torch
4
  from torch import autocast
5
  from diffusers import StableDiffusionPipeline
6
- from datasets import load_dataset
7
  from PIL import Image
8
- import re
9
  from styles import css, header_html, footer_html
10
  from examples import examples
11
  from transformers import pipeline
@@ -20,10 +18,6 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
20
  pipe = StableDiffusionPipeline.from_pretrained(
21
  model_id, use_auth_token=True, revision="fp16", torch_dtype=torch.float16)
22
  pipe = pipe.to(device)
23
- # When running locally, you won`t have access to this, so you can remove this part
24
- word_list_dataset = load_dataset(
25
- "stabilityai/word-list", data_files="list.txt", use_auth_token=os.environ.get('auth_token')
26
- word_list = word_list_dataset["train"]['text']
27
 
28
 
29
  def transcribe(audio):
@@ -32,13 +26,8 @@ def transcribe(audio):
32
 
33
 
34
  def infer(audio, samples, steps, scale, seed):
35
- prompt = transcribe(audio)
36
- # When running locally you can also remove this filter
37
- for filter in word_list:
38
- if re.search(rf"\b{filter}\b", prompt):
39
- raise gr.Error(
40
- "Unsafe content found. Please try again with different prompts.")
41
 
 
42
  generator = torch.Generator(device=device).manual_seed(seed)
43
 
44
  # If you are running locally with CPU, you can remove the `with autocast("cuda")`
@@ -131,4 +120,4 @@ with block:
131
  )
132
  gr.HTML(footer_html)
133
 
134
- block.queue(max_size=25).launch()
 
3
  import torch
4
  from torch import autocast
5
  from diffusers import StableDiffusionPipeline
 
6
  from PIL import Image
 
7
  from styles import css, header_html, footer_html
8
  from examples import examples
9
  from transformers import pipeline
 
18
  pipe = StableDiffusionPipeline.from_pretrained(
19
  model_id, use_auth_token=True, revision="fp16", torch_dtype=torch.float16)
20
  pipe = pipe.to(device)
 
 
 
 
21
 
22
 
23
  def transcribe(audio):
 
26
 
27
 
28
  def infer(audio, samples, steps, scale, seed):
 
 
 
 
 
 
29
 
30
+ prompt = transcribe(audio)
31
  generator = torch.Generator(device=device).manual_seed(seed)
32
 
33
  # If you are running locally with CPU, you can remove the `with autocast("cuda")`
 
120
  )
121
  gr.HTML(footer_html)
122
 
123
+ block.queue(max_size=25).launch()