alfredplpl commited on
Commit
4b94657
1 Parent(s): dc3aa6a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -12,7 +12,7 @@ device = "cuda"
12
  dtype = torch.float16
13
 
14
  repo = "stabilityai/stable-diffusion-3-medium"
15
- pipe = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16, revision="refs/pr/26",token=os.environ["TOKEN"]).to(device)
16
 
17
  model_id = "microsoft/Phi-3-medium-4k-instruct"
18
  model = AutoModelForCausalLM.from_pretrained(
@@ -23,7 +23,7 @@ model = AutoModelForCausalLM.from_pretrained(
23
  )
24
  tokenizer = AutoTokenizer.from_pretrained(model_id)
25
 
26
- pipe = pipeline(
27
  "text-generation",
28
  model=model,
29
  tokenizer=tokenizer,
@@ -54,12 +54,12 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
54
  {"role": "assistant", "content": "A gourmet scene in a high-end restaurant kitchen where a chef is presenting a plate of cooked beef testicles, garnished elegantly with herbs and spices. The chef, a middle-aged Caucasian man wearing a white chef's hat and coat, is inspecting the dish with a satisfied expression. The kitchen background is bustling with other chefs and kitchen staff, and the atmosphere is warm and inviting with hanging pots and pans, and a glowing, busy stove in the background. The focus is on the chef's proud presentation of this unusual but delicately prepared dish."},
55
  {"role": "user", "content": prompt},
56
  ]
57
- output = pipe(messages, **generation_args)
58
  upsampled_prompt=output[0]['generated_text']
59
 
60
  print(upsampled_prompt)
61
 
62
- image = pipe(
63
  prompt = upsampled_prompt,
64
  negative_prompt = negative_prompt,
65
  guidance_scale = guidance_scale,
 
12
  dtype = torch.float16
13
 
14
  repo = "stabilityai/stable-diffusion-3-medium"
15
+ t2i = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16, revision="refs/pr/26",token=os.environ["TOKEN"]).to(device)
16
 
17
  model_id = "microsoft/Phi-3-medium-4k-instruct"
18
  model = AutoModelForCausalLM.from_pretrained(
 
23
  )
24
  tokenizer = AutoTokenizer.from_pretrained(model_id)
25
 
26
+ upsampler = pipeline(
27
  "text-generation",
28
  model=model,
29
  tokenizer=tokenizer,
 
54
  {"role": "assistant", "content": "A gourmet scene in a high-end restaurant kitchen where a chef is presenting a plate of cooked beef testicles, garnished elegantly with herbs and spices. The chef, a middle-aged Caucasian man wearing a white chef's hat and coat, is inspecting the dish with a satisfied expression. The kitchen background is bustling with other chefs and kitchen staff, and the atmosphere is warm and inviting with hanging pots and pans, and a glowing, busy stove in the background. The focus is on the chef's proud presentation of this unusual but delicately prepared dish."},
55
  {"role": "user", "content": prompt},
56
  ]
57
+ output = upsampler(messages, **generation_args)
58
  upsampled_prompt=output[0]['generated_text']
59
 
60
  print(upsampled_prompt)
61
 
62
+ image = t2i(
63
  prompt = upsampled_prompt,
64
  negative_prompt = negative_prompt,
65
  guidance_scale = guidance_scale,