alfredplpl commited on
Commit
718bcd6
1 Parent(s): 42eccb2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -2
app.py CHANGED
@@ -6,6 +6,7 @@ import random
6
  import torch
7
  from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler
8
  import spaces
 
9
 
10
  device = "cuda"
11
  dtype = torch.float16
@@ -13,6 +14,27 @@ dtype = torch.float16
13
  repo = "stabilityai/stable-diffusion-3-medium"
14
  pipe = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16, revision="refs/pr/26",token=os.environ["TOKEN"]).to(device)
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 1344
18
 
@@ -23,9 +45,19 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
23
  seed = random.randint(0, MAX_SEED)
24
 
25
  generator = torch.Generator().manual_seed(seed)
26
-
 
 
 
 
 
 
 
 
 
 
27
  image = pipe(
28
- prompt = prompt,
29
  negative_prompt = negative_prompt,
30
  guidance_scale = guidance_scale,
31
  num_inference_steps = num_inference_steps,
 
6
  import torch
7
  from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler
8
  import spaces
9
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
10
 
11
  device = "cuda"
12
  dtype = torch.float16
 
14
  repo = "stabilityai/stable-diffusion-3-medium"
15
  pipe = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16, revision="refs/pr/26",token=os.environ["TOKEN"]).to(device)
16
 
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ model_id,
19
+ device_map=device,
20
+ torch_dtype=torch.bfloat16,
21
+ trust_remote_code=True,
22
+ )
23
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
24
+
25
+ pipe = pipeline(
26
+ "text-generation",
27
+ model=model,
28
+ tokenizer=tokenizer,
29
+ )
30
+
31
+ generation_args = {
32
+ "max_new_tokens": 300,
33
+ "return_full_text": False,
34
+ "temperature": 0.7,
35
+ "do_sample": True,
36
+ }
37
+
38
  MAX_SEED = np.iinfo(np.int32).max
39
  MAX_IMAGE_SIZE = 1344
40
 
 
45
  seed = random.randint(0, MAX_SEED)
46
 
47
  generator = torch.Generator().manual_seed(seed)
48
+
49
+ messages = [
50
+ {"role": "user", "content": "クールなアニメ風の少女"},
51
+ {"role": "assistant", "content": "An anime style illustration of a cool-looking teenage girl with an edgy, confident expression. She has piercing eyes, a slight smirk, and colorful hair that flows in the wind. She wears a trendy punk-inspired outfit with a leather jacket, ripped jeans, and combat boots. The background has an urban nighttime feel with city lights and graffiti to match her rebellious vibe. The colors are vibrant with high contrast to give an impactful look. The overall style captures her undeniable coolness and fearless attitude."},
52
+ {"role": "user", "content": "美味しそうな肉"},
53
+ {"role": "assistant", "content": "A gourmet scene in a high-end restaurant kitchen where a chef is presenting a plate of cooked beef testicles, garnished elegantly with herbs and spices. The chef, a middle-aged Caucasian man wearing a white chef's hat and coat, is inspecting the dish with a satisfied expression. The kitchen background is bustling with other chefs and kitchen staff, and the atmosphere is warm and inviting with hanging pots and pans, and a glowing, busy stove in the background. The focus is on the chef's proud presentation of this unusual but delicately prepared dish."},
54
+ {"role": "user", "content": prompt},
55
+ ]
56
+ output = pipe(messages, **generation_args)
57
+ upsampled_prompt=output[0]['generated_text']
58
+
59
  image = pipe(
60
+ prompt = upsampled_prompt,
61
  negative_prompt = negative_prompt,
62
  guidance_scale = guidance_scale,
63
  num_inference_steps = num_inference_steps,