ljh838 commited on
Commit
a0a7332
1 Parent(s): bdda10b

Upload 36 files

Browse files
.gitattributes CHANGED
@@ -2,13 +2,11 @@
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
  *.npy filter=lfs diff=lfs merge=lfs -text
@@ -16,13 +14,12 @@
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
  *.ot filter=lfs diff=lfs merge=lfs -text
18
  *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
  *.pickle filter=lfs diff=lfs merge=lfs -text
21
  *.pkl filter=lfs diff=lfs merge=lfs -text
 
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
 
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
5
  *.ftz filter=lfs diff=lfs merge=lfs -text
6
  *.gz filter=lfs diff=lfs merge=lfs -text
7
  *.h5 filter=lfs diff=lfs merge=lfs -text
8
  *.joblib filter=lfs diff=lfs merge=lfs -text
9
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
10
  *.model filter=lfs diff=lfs merge=lfs -text
11
  *.msgpack filter=lfs diff=lfs merge=lfs -text
12
  *.npy filter=lfs diff=lfs merge=lfs -text
 
14
  *.onnx filter=lfs diff=lfs merge=lfs -text
15
  *.ot filter=lfs diff=lfs merge=lfs -text
16
  *.parquet filter=lfs diff=lfs merge=lfs -text
 
17
  *.pickle filter=lfs diff=lfs merge=lfs -text
18
  *.pkl filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
  *.pt filter=lfs diff=lfs merge=lfs -text
21
  *.pth filter=lfs diff=lfs merge=lfs -text
22
  *.rar filter=lfs diff=lfs merge=lfs -text
 
23
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
  *.tar.* filter=lfs diff=lfs merge=lfs -text
25
  *.tflite filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Paint by example
3
+ emoji: 🔥
4
+ colorFrom: green
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.6
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: akhaliq/paint-by-example
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
+
app.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from io import BytesIO
4
+ import requests
5
+ import PIL
6
+ from PIL import Image
7
+ import numpy as np
8
+ import os
9
+ import uuid
10
+ import torch
11
+ from torch import autocast
12
+ import cv2
13
+ from matplotlib import pyplot as plt
14
+ from torchvision import transforms
15
+ from diffusers import DiffusionPipeline
16
+ from diffusers.utils import torch_device
17
+ pipe = DiffusionPipeline.from_pretrained(
18
+ "Fantasy-Studio/Paint-by-Example",
19
+ torch_dtype=torch.float16,
20
+ )
21
+ pipe = pipe.to("cuda")
22
+
23
+ from share_btn import community_icon_html, loading_icon_html, share_js
24
+
25
+ def read_content(file_path: str) -> str:
26
+ """read the content of target file
27
+ """
28
+ with open(file_path, 'r', encoding='utf-8') as f:
29
+ content = f.read()
30
+
31
+ return content
32
+
33
+ def predict(dict, reference, scale, seed, step):
34
+ width,height=dict["image"].size
35
+ if width<height:
36
+ factor=width/512.0
37
+ width=512
38
+ height=int((height/factor)/8.0)*8
39
+
40
+ else:
41
+ factor=height/512.0
42
+ height=512
43
+ width=int((width/factor)/8.0)*8
44
+ init_image = dict["image"].convert("RGB").resize((width,height))
45
+ mask = dict["mask"].convert("RGB").resize((width,height))
46
+ generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
47
+ output = pipe(
48
+ image=init_image,
49
+ mask_image=mask,
50
+ example_image=reference,
51
+ generator=generator,
52
+ guidance_scale=scale,
53
+ num_inference_steps=step,
54
+ ).images[0]
55
+ return output, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
56
+
57
+
58
+ css = '''
59
+ .container {max-width: 1150px;margin: auto;padding-top: 1.5rem}
60
+ #image_upload{min-height:400px}
61
+ #image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height: 400px}
62
+ #mask_radio .gr-form{background:transparent; border: none}
63
+ #word_mask{margin-top: .75em !important}
64
+ #word_mask textarea:disabled{opacity: 0.3}
65
+ .footer {margin-bottom: 45px;margin-top: 35px;text-align: center;border-bottom: 1px solid #e5e5e5}
66
+ .footer>p {font-size: .8rem; display: inline-block; padding: 0 10px;transform: translateY(10px);background: white}
67
+ .dark .footer {border-color: #303030}
68
+ .dark .footer>p {background: #0b0f19}
69
+ .acknowledgments h4{margin: 1.25em 0 .25em 0;font-weight: bold;font-size: 115%}
70
+ #image_upload .touch-none{display: flex}
71
+ @keyframes spin {
72
+ from {
73
+ transform: rotate(0deg);
74
+ }
75
+ to {
76
+ transform: rotate(360deg);
77
+ }
78
+ }
79
+ #share-btn-container {
80
+ display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
81
+ }
82
+ #share-btn {
83
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
84
+ }
85
+ #share-btn * {
86
+ all: unset;
87
+ }
88
+ #share-btn-container div:nth-child(-n+2){
89
+ width: auto !important;
90
+ min-height: 0px !important;
91
+ }
92
+ #share-btn-container .wrap {
93
+ display: none !important;
94
+ }
95
+ '''
96
+ example={}
97
+ ref_dir='examples/reference'
98
+ image_dir='examples/image'
99
+ ref_list=[os.path.join(ref_dir,file) for file in os.listdir(ref_dir)]
100
+ ref_list.sort()
101
+ image_list=[os.path.join(image_dir,file) for file in os.listdir(image_dir)]
102
+ image_list.sort()
103
+
104
+
105
+ image_blocks = gr.Blocks(css=css)
106
+ with image_blocks as demo:
107
+ gr.HTML(read_content("header.html"))
108
+ with gr.Group():
109
+ with gr.Box():
110
+ with gr.Row():
111
+ with gr.Column():
112
+ image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="pil", label="Source Image")
113
+ reference = gr.Image(source='upload', elem_id="image_upload", type="pil", label="Reference Image")
114
+
115
+ with gr.Column():
116
+ image_out = gr.Image(label="Output", elem_id="output-img").style(height=400)
117
+ guidance = gr.Slider(label="Guidance scale", value=5, maximum=15,interactive=True)
118
+ steps = gr.Slider(label="Steps", value=50, minimum=2, maximum=75, step=1,interactive=True)
119
+
120
+ seed = gr.Slider(0, 10000, label='Seed (0 = random)', value=0, step=1)
121
+
122
+ with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
123
+ btn = gr.Button("Paint!").style(
124
+ margin=False,
125
+ rounded=(False, True, True, False),
126
+ full_width=True,
127
+ )
128
+ with gr.Group(elem_id="share-btn-container"):
129
+ community_icon = gr.HTML(community_icon_html, visible=True)
130
+ loading_icon = gr.HTML(loading_icon_html, visible=True)
131
+ share_button = gr.Button("Share to community", elem_id="share-btn", visible=True)
132
+
133
+
134
+ with gr.Row():
135
+ with gr.Column():
136
+ gr.Examples(image_list, inputs=[image],label="Examples - Source Image",examples_per_page=12)
137
+ with gr.Column():
138
+ gr.Examples(ref_list, inputs=[reference],label="Examples - Reference Image",examples_per_page=12)
139
+
140
+ btn.click(fn=predict, inputs=[image, reference, guidance, seed, steps], outputs=[image_out, community_icon, loading_icon, share_button])
141
+ share_button.click(None, [], [], _js=share_js)
142
+
143
+
144
+ image_blocks.launch(server_name='0.0.0.0')
examples/image/0.png ADDED
examples/image/1.png ADDED
examples/image/2.png ADDED
examples/image/2022-11-06-20-59-08.png ADDED
examples/image/2022-11-06-21-00-00.png ADDED
examples/image/2022-11-06-21-09-58.png ADDED
examples/image/2022-11-06-21-12-29.png ADDED
examples/image/2022-11-06-21-15-44.png ADDED
examples/image/2022-11-06-21-17-49.png ADDED
examples/image/2022-11-10-14-59-25.png ADDED
examples/image/2022-11-11-20-35-20.png ADDED
examples/image/2022-11-16-20-29-20.png ADDED
examples/image/2022-11-16-20-35-06.png ADDED
examples/image/2022-11-16-20-45-09.png ADDED
examples/image/2022-11-16-21-14-51.png ADDED
examples/image/2022-11-16-21-18-42.png ADDED
examples/mask/example_1.png ADDED
examples/mask/example_2.png ADDED
examples/mask/example_3.png ADDED
examples/reference/0.jpg ADDED
examples/reference/1.jpg ADDED
examples/reference/animal-4137865_1920.jpg ADDED
examples/reference/blackbird-7543630_1920.jpg ADDED
examples/reference/car-1283947_1920.jpg ADDED
examples/reference/dog-5584135_1920.jpg ADDED
examples/reference/dog-7317820_1920.jpg ADDED
examples/reference/example_1.jpg ADDED
examples/reference/new-york-1590176_1920.jpg ADDED
examples/reference/teddy-1085164_1920.jpg ADDED
header.html ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div style="text-align: center; max-width: 650px; margin: 0 auto;">
2
+ <div style="
3
+ display: inline-flex;
4
+ gap: 0.8rem;
5
+ font-size: 1.75rem;
6
+ justify-content: center;
7
+ margin-bottom: 10px;
8
+ ">
9
+ <h1 style="font-weight: 900; align-items: center; margin-bottom: 7px; margin-top: 20px;">
10
+ Paint by Example 🎨
11
+ </h1>
12
+ </div>
13
+ <div>
14
+ <p style="align-items: center; margin-bottom: 7px;">
15
+ Paint by Example, upload a source image and draw a mask for what you want to replace with a example image.
16
+ </p>
17
+ </div>
18
+ </div>
inpainting.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import List, Optional, Union
3
+
4
+ import numpy as np
5
+ import torch
6
+
7
+ import PIL
8
+ from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, PNDMScheduler, UNet2DConditionModel
9
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
10
+ from tqdm.auto import tqdm
11
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
12
+
13
+
14
+ def preprocess_image(image):
15
+ w, h = image.size
16
+ w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
17
+ image = image.resize((w, h), resample=PIL.Image.LANCZOS)
18
+ image = np.array(image).astype(np.float32) / 255.0
19
+ image = image[None].transpose(0, 3, 1, 2)
20
+ image = torch.from_numpy(image)
21
+ return 2.0 * image - 1.0
22
+
23
+
24
+ def preprocess_mask(mask):
25
+ mask = mask.convert("L")
26
+ w, h = mask.size
27
+ w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
28
+ mask = mask.resize((w // 8, h // 8), resample=PIL.Image.NEAREST)
29
+ mask = np.array(mask).astype(np.float32) / 255.0
30
+ mask = np.tile(mask, (4, 1, 1))
31
+ mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
32
+ mask = 1 - mask # repaint white, keep black
33
+ mask = torch.from_numpy(mask)
34
+ return mask
35
+
36
+ class StableDiffusionInpaintingPipeline(DiffusionPipeline):
37
+ def __init__(
38
+ self,
39
+ vae: AutoencoderKL,
40
+ text_encoder: CLIPTextModel,
41
+ tokenizer: CLIPTokenizer,
42
+ unet: UNet2DConditionModel,
43
+ scheduler: Union[DDIMScheduler, PNDMScheduler],
44
+ safety_checker: StableDiffusionSafetyChecker,
45
+ feature_extractor: CLIPFeatureExtractor,
46
+ ):
47
+ super().__init__()
48
+ scheduler = scheduler.set_format("pt")
49
+ self.register_modules(
50
+ vae=vae,
51
+ text_encoder=text_encoder,
52
+ tokenizer=tokenizer,
53
+ unet=unet,
54
+ scheduler=scheduler,
55
+ safety_checker=safety_checker,
56
+ feature_extractor=feature_extractor,
57
+ )
58
+
59
+ @torch.no_grad()
60
+ def __call__(
61
+ self,
62
+ prompt: Union[str, List[str]],
63
+ init_image: torch.FloatTensor,
64
+ mask_image: torch.FloatTensor,
65
+ strength: float = 0.8,
66
+ num_inference_steps: Optional[int] = 50,
67
+ guidance_scale: Optional[float] = 7.5,
68
+ eta: Optional[float] = 0.0,
69
+ generator: Optional[torch.Generator] = None,
70
+ output_type: Optional[str] = "pil",
71
+ ):
72
+
73
+ if isinstance(prompt, str):
74
+ batch_size = 1
75
+ elif isinstance(prompt, list):
76
+ batch_size = len(prompt)
77
+ else:
78
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
79
+
80
+ if strength < 0 or strength > 1:
81
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
82
+
83
+ # set timesteps
84
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
85
+ extra_set_kwargs = {}
86
+ offset = 0
87
+ if accepts_offset:
88
+ offset = 1
89
+ extra_set_kwargs["offset"] = 1
90
+
91
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
92
+
93
+ # preprocess image
94
+ init_image = preprocess_image(init_image).to(self.device)
95
+
96
+ # encode the init image into latents and scale the latents
97
+ init_latent_dist = self.vae.encode(init_image).latent_dist
98
+ init_latents = init_latent_dist.sample(generator=generator)
99
+ init_latents = 0.18215 * init_latents
100
+
101
+ # prepare init_latents noise to latents
102
+ init_latents = torch.cat([init_latents] * batch_size)
103
+ init_latents_orig = init_latents
104
+
105
+ # preprocess mask
106
+ mask = preprocess_mask(mask_image).to(self.device)
107
+ mask = torch.cat([mask] * batch_size)
108
+
109
+ # check sizes
110
+ if not mask.shape == init_latents.shape:
111
+ raise ValueError(f"The mask and init_image should be the same size!")
112
+
113
+ # get the original timestep using init_timestep
114
+ init_timestep = int(num_inference_steps * strength) + offset
115
+ init_timestep = min(init_timestep, num_inference_steps)
116
+ timesteps = self.scheduler.timesteps[-init_timestep]
117
+ timesteps = torch.tensor([timesteps] * batch_size, dtype=torch.long, device=self.device)
118
+
119
+ # add noise to latents using the timesteps
120
+ noise = torch.randn(init_latents.shape, generator=generator, device=self.device)
121
+ init_latents = self.scheduler.add_noise(init_latents, noise, timesteps)
122
+
123
+ # get prompt text embeddings
124
+ text_input = self.tokenizer(
125
+ prompt,
126
+ padding="max_length",
127
+ max_length=self.tokenizer.model_max_length,
128
+ truncation=True,
129
+ return_tensors="pt",
130
+ )
131
+ text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
132
+
133
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
134
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
135
+ # corresponds to doing no classifier free guidance.
136
+ do_classifier_free_guidance = guidance_scale > 1.0
137
+ # get unconditional embeddings for classifier free guidance
138
+ if do_classifier_free_guidance:
139
+ max_length = text_input.input_ids.shape[-1]
140
+ uncond_input = self.tokenizer(
141
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
142
+ )
143
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
144
+
145
+ # For classifier free guidance, we need to do two forward passes.
146
+ # Here we concatenate the unconditional and text embeddings into a single batch
147
+ # to avoid doing two forward passes
148
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
149
+
150
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
151
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
152
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
153
+ # and should be between [0, 1]
154
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
155
+ extra_step_kwargs = {}
156
+ if accepts_eta:
157
+ extra_step_kwargs["eta"] = eta
158
+
159
+ latents = init_latents
160
+ t_start = max(num_inference_steps - init_timestep + offset, 0)
161
+ for i, t in tqdm(enumerate(self.scheduler.timesteps[t_start:])):
162
+ # expand the latents if we are doing classifier free guidance
163
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
164
+
165
+ # predict the noise residual
166
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
167
+
168
+ # perform guidance
169
+ if do_classifier_free_guidance:
170
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
171
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
172
+
173
+ # compute the previous noisy sample x_t -> x_t-1
174
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)["prev_sample"]
175
+
176
+ # masking
177
+ init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, t)
178
+ latents = (init_latents_proper * mask) + (latents * (1 - mask))
179
+
180
+ # scale and decode the image latents with vae
181
+ latents = 1 / 0.18215 * latents
182
+ image = self.vae.decode(latents).sample
183
+
184
+ image = (image / 2 + 0.5).clamp(0, 1)
185
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
186
+
187
+ # run safety checker
188
+ safety_cheker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(self.device)
189
+ image, has_nsfw_concept = self.safety_checker(images=image, clip_input=safety_cheker_input.pixel_values)
190
+
191
+ if output_type == "pil":
192
+ image = self.numpy_to_pil(image)
193
+
194
+ return {"sample": image, "nsfw_content_detected": has_nsfw_concept}
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu116
2
+ torch
3
+ torchvision
4
+ git+https://github.com/huggingface/diffusers.git
5
+ transformers
6
+ ftfy
7
+ numpy
8
+ matplotlib
9
+ uuid
10
+ opencv-python
11
+ git+https://github.com/openai/CLIP.git
12
+ gradio
13
+ altair
14
+ accelerate
share_btn.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
2
+ <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
3
+ <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
4
+ </svg>"""
5
+
6
+ loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
7
+ style="color: #ffffff;
8
+ "
9
+ xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
10
+
11
+ share_js = """"""