alfredplpl commited on
Commit
c0c6c3f
1 Parent(s): bc55d7e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -2
app.py CHANGED
@@ -14,13 +14,38 @@ dtype = torch.float16
14
  repo = "stabilityai/stable-diffusion-3-medium"
15
  t2i = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16, revision="refs/pr/26",token=os.environ["TOKEN"]).to(device)
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  MAX_SEED = np.iinfo(np.int32).max
18
  MAX_IMAGE_SIZE = 1344
19
 
20
  @spaces.GPU
21
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
22
- upsampled_prompt="An anime style illustration of a cool-looking teenage girl with an edgy, confident expression. She has piercing eyes, a slight smirk, and colorful hair that flows in the wind. She wears a trendy punk-inspired outfit with a leather jacket, ripped jeans, and combat boots. The background has an urban nighttime feel with city lights and graffiti to match her rebellious vibe. The colors are vibrant with high contrast to give an impactful look. The overall style captures her undeniable coolness and fearless attitude."
23
- print(upsampled_prompt)
 
 
 
 
 
24
 
25
  if randomize_seed:
26
  seed = random.randint(0, MAX_SEED)
 
14
  repo = "stabilityai/stable-diffusion-3-medium"
15
  t2i = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16, revision="refs/pr/26",token=os.environ["TOKEN"]).to(device)
16
 
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ "microsoft/Phi-3-mini-4k-instruct",
19
+ device_map="cuda",
20
+ torch_dtype=torch.bfloat16,
21
+ trust_remote_code=True,
22
+ )
23
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
24
+ upsampler = pipeline(
25
+ "text-generation",
26
+ model=model,
27
+ tokenizer=tokenizer,
28
+ )
29
+
30
+ generation_args = {
31
+ "max_new_tokens": 300,
32
+ "return_full_text": False,
33
+ "temperature": 0.0,
34
+ "do_sample": False,
35
+ }
36
+
37
  MAX_SEED = np.iinfo(np.int32).max
38
  MAX_IMAGE_SIZE = 1344
39
 
40
  @spaces.GPU
41
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
42
+ messages = [
43
+ {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
44
+ {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."},
45
+ {"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"},
46
+ ]
47
+ output = pipe(messages, **generation_args)
48
+ upsampled_prompt=output[0]['generated_text']
49
 
50
  if randomize_seed:
51
  seed = random.randint(0, MAX_SEED)