vilarin commited on
Commit
85b9ea4
·
verified ·
1 Parent(s): d0d629b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -147
app.py CHANGED
@@ -1,160 +1,100 @@
1
- import spaces
2
- import os
3
- # import subprocess
4
- # import shlex
5
- # if os.getenv('SYSTEM') == 'spaces':
6
- # git_repo = "https://github.com/huggingface/transformers.git"
7
- # subprocess.call(shlex.split(f'pip install git+{git_repo}'))
8
-
9
- import time
10
  import torch
11
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
12
  import gradio as gr
13
- from threading import Thread
14
-
15
- MODEL_LIST = ["openbmb/MiniCPM3-4B"]
16
- HF_TOKEN = os.environ.get("HF_TOKEN", None)
17
- MODEL = os.environ.get("MODEL_ID")
18
-
19
- TITLE = "<h1><center>MiniCPM3-4B</center></h1>"
20
 
21
- PLACEHOLDER = """
22
- <center>
23
- <p>MiniCPM3-4B is the 3rd generation of MiniCPM series.</p>
24
- </center>
25
- """
26
 
 
 
27
 
28
- CSS = """
29
- .duplicate-button {
30
- margin: auto !important;
31
- color: white !important;
32
- background: black !important;
33
- border-radius: 100vh !important;
34
- }
35
- h3 {
36
- text-align: center;
37
- }
38
- """
39
-
40
- device = "cuda" # for GPU usage or "cpu" for CPU usage
41
-
42
- tokenizer = AutoTokenizer.from_pretrained(MODEL, trust_remote_code=True)
43
- model = AutoModelForCausalLM.from_pretrained(
44
- MODEL,
45
- torch_dtype=torch.bfloat16,
46
- device_map="auto",
47
- trust_remote_code=True)
48
 
49
  @spaces.GPU()
50
- def stream_chat(
51
- message: str,
52
- history: list,
53
- temperature: float = 0.7,
54
- max_new_tokens: int = 1024,
55
- top_p: float = 1.0,
56
- top_k: int = 20,
57
- penalty: float = 1.2,
58
- ):
59
- print(f'message: {message}')
60
- print(f'history: {history}')
61
-
62
- conversation = []
63
- for prompt, answer in history:
64
- conversation.extend([
65
- {"role": "user", "content": prompt},
66
- {"role": "assistant", "content": answer},
67
- ])
68
-
69
- conversation.append({"role": "user", "content": message})
70
-
71
- input_text=tokenizer.apply_chat_template(conversation, tokenize=False)
72
- inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
73
- streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
74
-
75
- generate_kwargs = dict(
76
- input_ids=inputs,
77
- max_new_tokens = max_new_tokens,
78
- do_sample = False if temperature == 0 else True,
79
- top_p = top_p,
80
- top_k = top_k,
81
- temperature = temperature,
82
- streamer=streamer,
83
- repetition_penalty=penalty,
84
- eos_token_id = [2, 73440],
85
- )
86
-
87
- with torch.no_grad():
88
- thread = Thread(target=model.generate, kwargs=generate_kwargs)
89
- thread.start()
90
 
91
- buffer = ""
92
- for new_text in streamer:
93
- buffer += new_text
94
- yield buffer
95
-
96
-
97
- chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
98
-
99
- with gr.Blocks(css=CSS, theme="Nymbo/Nymbo_Theme") as demo:
100
- gr.HTML(TITLE)
101
- gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
102
- gr.ChatInterface(
103
- fn=stream_chat,
104
- chatbot=chatbot,
105
- fill_height=True,
106
- additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
107
- additional_inputs=[
108
- gr.Slider(
109
- minimum=0,
110
- maximum=1,
111
- step=0.1,
112
- value=0.7,
113
- label="Temperature",
114
- render=False,
115
- ),
116
- gr.Slider(
117
- minimum=128,
118
- maximum=32768,
119
- step=1,
120
- value=1024,
121
- label="Max new tokens",
122
- render=False,
123
- ),
124
- gr.Slider(
125
- minimum=0.0,
126
- maximum=1.0,
127
- step=0.1,
128
- value=1.0,
129
- label="top_p",
130
- render=False,
131
- ),
132
- gr.Slider(
133
- minimum=1,
134
- maximum=20,
135
- step=1,
136
- value=20,
137
- label="top_k",
138
- render=False,
139
- ),
140
- gr.Slider(
141
- minimum=0.0,
142
- maximum=2.0,
143
- step=0.1,
144
- value=1.2,
145
- label="Repetition penalty",
146
- render=False,
147
- ),
148
  ],
149
- examples=[
150
- ["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."],
151
- ["What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."],
152
- ["Tell me a random fun fact about the Roman Empire."],
153
- ["Show me a code snippet of a website's sticky header in CSS and JavaScript."],
 
 
 
 
154
  ],
155
- cache_examples=False,
156
  )
157
 
158
-
159
  if __name__ == "__main__":
160
- demo.launch()
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
+ import spaces
3
  import gradio as gr
4
+ from diffusers import FluxInpaintPipeline
5
+ import random
6
+ import numpy as np
 
 
 
 
7
 
8
+ MAX_SEED = np.iinfo(np.int32).max
9
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
10
 
11
+ pipe = FluxInpaintPipeline.from_pretrained(
12
+ "black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to(DEVICE)
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  @spaces.GPU()
16
+ def inpaintGen(
17
+ imgMask,
18
+ inpaint_prompt: str,
19
+ strength: float,
20
+ guidance: float,
21
+ num_steps: int,
22
+ seed: int,
23
+ randomize_seed: bool,
24
+ progress=gr.Progress(track_tqdm=True)):
25
+
26
+ source_img = imgMask["background"]
27
+ mask_img = imgMask["layers"][0]
28
+
29
+ if not source_path:
30
+ raise gr.Error("Please upload an image.")
31
+
32
+ if not mask_path:
33
+ raise gr.Error("Please draw a mask on the image.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
+ width, height = source_img.size
36
+
37
+ if randomize_seed:
38
+ seed = random.randint(0, MAX_SEED)
39
+ generator = torch.Generator(device=DEVICE).manual_seed(seed)
40
+
41
+ result = pipe(
42
+ prompt=inpaint_prompt,
43
+ image=source_img,
44
+ seed=seed,
45
+ mask_image=mask_img,
46
+ width=width,
47
+ height=height,
48
+ strength=strength,
49
+ num_inference_steps=num_steps,
50
+ generator=generator,
51
+ guidance_scale=guidance
52
+ ).images[0]
53
+
54
+ return result
55
+
56
+
57
+ with gr.Blocks(theme="ocean", title="Flux.1 dev inpaint", css=CSS) as demo:
58
+ gr.HTML("<h1><center>Flux.1 dev Inpaint</center></h1>")
59
+ gr.HTML("""
60
+ <p>
61
+ <center>
62
+ A partial redraw of the image based on your prompt words and occluded parts.
63
+ </center>
64
+ </p>
65
+ """)
66
+ with gr.Row():
67
+ with gr.Column():
68
+ imgMask = gr.ImageMask(type="pil", label="Image", layers=False, height=800)
69
+ inpaint_prompt = gr.Textbox(label='Prompts ✏️', placeholder="A hat...")
70
+ with gr.Row():
71
+ Inpaint_sendBtn = gr.Button(value="Submit", variant='primary')
72
+ Inpaint_clearBtn = gr.ClearButton([imgMask, inpaint_prompt], value="Clear")
73
+ image_out = gr.Image(type="pil", label="Output", height=960)
74
+ with gr.Accordion("Advanced ⚙️", open=False):
75
+ strength = gr.Slider(label="Strength", minimum=0, maximum=1, value=1, step=0.1)
76
+ guidance = gr.Slider(label="Guidance scale", minimum=1, maximum=20, value=7.5, step=0.1)
77
+ num_steps = gr.Slider(label="Steps", minimum=1, maximum=20, value=20, step=1)
78
+ seed = gr.Number(label="Seed", value=42, precision=0)
79
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
80
+
81
+ gr.on(
82
+ triggers = [
83
+ inpaint_prompt.submit,
84
+ Inpaint_sendBtn.click,
 
 
 
 
 
 
 
85
  ],
86
+ fn = inpaintGen,
87
+ inputs = [
88
+ imgMask,
89
+ inpaint_prompt,
90
+ strength,
91
+ guidance,
92
+ num_steps,
93
+ seed,
94
+ randomize_seed
95
  ],
96
+ outputs = [image_out, seed]
97
  )
98
 
 
99
  if __name__ == "__main__":
100
+ demo.queue(api_open=False).launch(show_api=False, share=False)