Shuaf98 commited on
Commit
7feea97
1 Parent(s): a4c54f2
Files changed (1) hide show
  1. app.py +7 -14
app.py CHANGED
@@ -5,26 +5,19 @@ from torch import autocast
5
  from diffusers import StableDiffusionPipeline
6
  from datasets import load_dataset
7
  from PIL import Image
8
- # import re
 
9
  import os
 
10
 
11
  model_id = "CompVis/stable-diffusion-v1-4"
12
- # device = "cuda"
13
 
14
  #If you are running this code locally, you need to either do a 'huggingface-cli login` or paste your User Access Token from here https://huggingface.co/settings/tokens into the use_auth_token field below.
15
- # pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=os.environ.get("TOKEN"), revision="fp16", torch_dtype=torch.float16)
16
- pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16, use_auth_token = os.environ.get("TOKEN"))
17
- # pipe = pipe.to(device)
18
- #When running locally, you won`t have access to this, so you can remove this part
19
- # word_list_dataset = load_dataset("stabilityai/word-list", data_files="list.txt", use_auth_token=True)
20
- # word_list = word_list_dataset["train"]['text']
21
 
22
  def infer(prompt, samples, steps, scale, seed):
23
- #When running locally you can also remove this filter
24
- # for filter in word_list:
25
- # if re.search(rf"\b{filter}\b", prompt):
26
- # raise gr.Error("Unsafe content found. Please try again with different prompts.")
27
-
28
  generator = torch.Generator(device=device).manual_seed(seed)
29
 
30
  #If you are running locally with CPU, you can remove the `with autocast("cuda")`
@@ -301,4 +294,4 @@ Despite how impressive being able to turn text into image is, beware to the fact
301
  """
302
  )
303
 
304
- block.queue(max_size=10).launch()
5
  from diffusers import StableDiffusionPipeline
6
  from datasets import load_dataset
7
  from PIL import Image
8
+ import re
9
+
10
  import os
11
+ ACCESS_TOKEN = os.getenv('TOKEN')
12
 
13
  model_id = "CompVis/stable-diffusion-v1-4"
14
+ device = "cpu"
15
 
16
  #If you are running this code locally, you need to either do a 'huggingface-cli login` or paste your User Access Token from here https://huggingface.co/settings/tokens into the use_auth_token field below.
17
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", use_auth_token=ACCESS_TOKEN)
18
+ pipe = pipe.to(device)
 
 
 
 
19
 
20
  def infer(prompt, samples, steps, scale, seed):
 
 
 
 
 
21
  generator = torch.Generator(device=device).manual_seed(seed)
22
 
23
  #If you are running locally with CPU, you can remove the `with autocast("cuda")`
294
  """
295
  )
296
 
297
+ block.queue(max_size=10).launch()