jan-hq commited on
Commit
68fc348
1 Parent(s): c6e08f7

Upload folder using huggingface_hub

Browse files
app.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ parser = argparse.ArgumentParser(description="WhisperVQ Application")
3
+ parser.add_argument('--log-path', type=str,
4
+ default='whisper.log', help='The log file path')
5
+ parser.add_argument('--log-level', type=str, default='INFO',
6
+ choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'TRACE'], help='The log level')
7
+ parser.add_argument('--port', type=int, default=3348,
8
+ help='The port to run the WhisperVQ app on')
9
+ parser.add_argument('--package-dir', type=str, default="",
10
+ help='The package-dir to be extended to sys.path')
11
+ args = parser.parse_args()
12
+ import sys
13
+ sys.path.insert(0, args.environment)
14
+ import tempfile
15
+ from typing import Tuple
16
+ from enum import Enum
17
+ import io
18
+ import logging
19
+ from custom_component import CustomRQBottleneckTransformer
20
+ from whisperspeech.vq_stoks import RQBottleneckTransformer
21
+ from huggingface_hub import hf_hub_download
22
+ import uvicorn
23
+ from transformers import WhisperModel, WhisperProcessor
24
+ from fastapi.responses import JSONResponse
25
+ from fastapi import FastAPI, File, UploadFile, HTTPException
26
+ from contextlib import asynccontextmanager
27
+ import torchaudio
28
+ import torch
29
+ import os
30
+ import time
31
+ import psutil
32
+ import threading
33
+
34
+
35
+ logging.basicConfig(level=args.log_level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
36
+ handlers=[
37
+ logging.FileHandler(args.log_path),
38
+ # logging.StreamHandler()
39
+ ])
40
+ logger = logging.getLogger(__name__)
41
+
42
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Use the first GPU
43
+
44
+
45
+ device = "cuda" if torch.cuda.is_available() else "cpu"
46
+ if not os.path.exists(os.path.dirname(os.path.realpath(__file__))+"/whisper-vq-stoks-v3-7lang-fixed.model"):
47
+ hf_hub_download(
48
+ repo_id="jan-hq/WhisperVQ",
49
+ filename="whisper-vq-stoks-v3-7lang-fixed.model",
50
+ local_dir=".",
51
+ )
52
+ vq_model = CustomRQBottleneckTransformer.load_vq_only(
53
+ os.path.dirname(os.path.realpath(__file__)) +
54
+ "/whisper-vq-stoks-v3-7lang-fixed.model"
55
+ ).to(device)
56
+ vq_model.load_encoder(device)
57
+ vq_model.eval()
58
+
59
+
60
+ @asynccontextmanager
61
+ async def lifespan(app: FastAPI):
62
+
63
+ yield
64
+ # on shutdown
65
+
66
+
67
+ # vq_model = torch.compile(vq_model)
68
+
69
+
70
+ class AudioFormat(str, Enum):
71
+ WAV = "wav" # Supported by both backends
72
+ MP3 = "mp3" # Supported by ffmpeg
73
+ FLAC = "flac" # Supported by both
74
+ AAC = "aac" # Supported by ffmpeg
75
+ OGG = "ogg" # Supported by ffmpeg
76
+ OPUS = "opus" # Supported by ffmpeg
77
+ PCM = "pcm" # Raw PCM data
78
+
79
+
80
+ # Format to backend mapping
81
+ FORMAT_BACKENDS = {
82
+ AudioFormat.WAV: ["soundfile", "ffmpeg"],
83
+ AudioFormat.MP3: ["ffmpeg"],
84
+ AudioFormat.FLAC: ["soundfile", "ffmpeg"],
85
+ AudioFormat.AAC: ["ffmpeg"],
86
+ AudioFormat.OGG: ["ffmpeg"],
87
+ AudioFormat.OPUS: ["ffmpeg"],
88
+ AudioFormat.PCM: ["soundfile"]
89
+ }
90
+
91
+
92
+ class AudioProcessor:
93
+ def __init__(self):
94
+ self.available_backends = torchaudio.list_audio_backends()
95
+ logger.info(f"Available backends: {self.available_backends}")
96
+
97
+ # Verify ffmpeg support
98
+ self.has_ffmpeg = "ffmpeg" in self.available_backends
99
+ if not self.has_ffmpeg:
100
+ logger.warning(
101
+ "FFMPEG backend not available. Some formats may not be supported")
102
+
103
+ def _get_best_backend(self, format: AudioFormat) -> str:
104
+ """Determine the best backend for the given format"""
105
+ supported_backends = FORMAT_BACKENDS[format]
106
+ for backend in supported_backends:
107
+ if backend in self.available_backends:
108
+ return backend
109
+ raise ValueError(f"No available backend supports format {format}")
110
+
111
+ async def load_audio(
112
+ self,
113
+ file_obj: bytes,
114
+ format: AudioFormat,
115
+ target_sr: int = 16000
116
+ ) -> Tuple[torch.Tensor, int]:
117
+ """
118
+ Load audio from bytes object with format handling
119
+
120
+ Args:
121
+ file_obj: Audio file bytes
122
+ format: Audio format enum
123
+ target_sr: Target sample rate (default: 16000)
124
+
125
+ Returns:
126
+ Tuple[torch.Tensor, int]: Audio tensor and sample rate
127
+ """
128
+ try:
129
+ # Get appropriate backend
130
+ backend = self._get_best_backend(format)
131
+ torchaudio.set_audio_backend(backend)
132
+ logger.info(f"Using {backend} backend for {format} format")
133
+
134
+ if format == AudioFormat.PCM:
135
+ # Handle raw PCM
136
+ wav = torch.frombuffer(file_obj, dtype=torch.int16)
137
+ wav = wav.float() / 32768.0 # Normalize to [-1, 1]
138
+ wav = wav.unsqueeze(0) # Add channel dimension
139
+ sr = target_sr
140
+ else:
141
+ # For formats that might need ffmpeg processing
142
+ if os.name == "nt": # for windows
143
+ wav, sr = torchaudio.load(io.BytesIO(file_obj))
144
+ else:
145
+ with tempfile.NamedTemporaryFile(suffix=f".{format}") as temp_file:
146
+ # Write bytes to temporary file
147
+ temp_file.write(file_obj)
148
+ temp_file.flush()
149
+
150
+ # Load audio
151
+ wav, sr = torchaudio.load(temp_file.name)
152
+
153
+ # Convert to mono if stereo
154
+ if wav.shape[0] > 1:
155
+ wav = torch.mean(wav, dim=0, keepdim=True)
156
+
157
+ # Resample if needed
158
+ if sr != target_sr:
159
+ wav = torchaudio.functional.resample(wav, sr, target_sr)
160
+ sr = target_sr
161
+
162
+ return wav, sr
163
+
164
+ except Exception as e:
165
+ logger.error(f"Error loading audio: {e}")
166
+ raise HTTPException(
167
+ status_code=400,
168
+ detail=f"Error processing {format} audio: {str(e)}"
169
+ )
170
+
171
+ def get_format_info(self) -> dict:
172
+ """Get information about supported formats"""
173
+ supported_formats = {}
174
+ for format in AudioFormat:
175
+ try:
176
+ backend = self._get_best_backend(format)
177
+ supported_formats[format] = {
178
+ "supported": True,
179
+ "backend": backend
180
+ }
181
+ except ValueError:
182
+ supported_formats[format] = {
183
+ "supported": False,
184
+ "backend": None
185
+ }
186
+ return supported_formats
187
+
188
+
189
+ audio_processor = AudioProcessor()
190
+
191
+ app = FastAPI(lifespan=lifespan)
192
+
193
+
194
+ @app.get("/supported_formats")
195
+ async def get_supported_formats():
196
+ """Endpoint to check supported formats"""
197
+ return audio_processor.get_format_info()
198
+
199
+
200
+ @app.post("/tokenize/{format}")
201
+ async def tokenize_audio(format: AudioFormat = "wav", file: UploadFile = File(...)):
202
+ try:
203
+ # Read file
204
+ file_obj = await file.read()
205
+
206
+ # Load and process audio
207
+ wav, sr = await audio_processor.load_audio(file_obj, format)
208
+
209
+ # Ensure we're using CUDA if available
210
+ device = "cuda" if torch.cuda.is_available() else "cpu"
211
+ wav = wav.to(device)
212
+
213
+ # Generate tokens
214
+ with torch.no_grad():
215
+ codes = vq_model.encode_audio(wav)
216
+ codes = codes[0].cpu().tolist()
217
+
218
+ # Format result
219
+ result = ''.join(f'<|sound_{num:04d}|>' for num in codes)
220
+
221
+ return JSONResponse(content={
222
+ "model_name": "whisper-vq-stoks-v3-7lang-fixed.model",
223
+ "tokens": f'<|sound_start|>{result}<|sound_end|>',
224
+ "format": format,
225
+ "sample_rate": sr,
226
+ "backend_used": audio_processor._get_best_backend(format)
227
+ })
228
+
229
+ except Exception as e:
230
+ logger.error(f"Error processing request: {e}")
231
+ raise HTTPException(
232
+ status_code=500,
233
+ detail=f"Error processing request: {str(e)}"
234
+ )
235
+
236
+
237
+ def self_terminate():
238
+ time.sleep(1)
239
+ parent = psutil.Process(psutil.Process(os.getpid()).ppid())
240
+ parent.kill()
241
+
242
+
243
+ @app.post("/kill")
244
+ async def kill():
245
+ threading.Thread(target=self_terminate, daemon=True).start()
246
+ return {"success": True}
247
+
248
+ if __name__ == "__main__":
249
+ import uvicorn
250
+ from uvicorn.config import LOGGING_CONFIG
251
+
252
+ LOGGING_CONFIG["handlers"]["default"] = {
253
+ "class": "logging.FileHandler",
254
+ "filename": args.log_path,
255
+ "formatter": "default"
256
+ }
257
+ LOGGING_CONFIG["handlers"]["access"] = {
258
+ "class": "logging.FileHandler",
259
+ "filename": args.log_path,
260
+ "formatter": "access"
261
+ }
262
+ LOGGING_CONFIG["loggers"]["uvicorn.error"]["level"] = args.log_level
263
+ LOGGING_CONFIG["loggers"]["uvicorn.access"]["level"] = args.log_level
264
+
265
+ # Print supported formats at startup
266
+ processor = AudioProcessor()
267
+ format_info = processor.get_format_info()
268
+ logger.info("Supported formats:")
269
+ for format, info in format_info.items():
270
+ logger.info(f"{format}: {info}")
271
+
272
+ uvicorn.run(app, host="0.0.0.0", port=args.port)
custom_component.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import whisper
4
+ from whisper.model import AudioEncoder, ModelDimensions
5
+ from typing import Dict, Optional
6
+ from whisperspeech.vq_stoks import RQBottleneckTransformer, Tunables
7
+ from huggingface_hub import hf_hub_download
8
+ import torch.nn.functional as F
9
+ import os
10
+ from typing import List, Optional, Union
11
+ import io
12
+ import urllib
13
+ from tqdm import tqdm
14
+ import torchaudio
15
+ _HF_MODELS = {
16
+ "medium": "https://huggingface.co/jan-hq/WhisperVQ/resolve/main/medium_encoder_only.pt",
17
+ }
18
+ def available_models() -> List[str]:
19
+ """Returns the names of available models"""
20
+ return list(_HF_MODELS.keys())
21
+ def _download(url: str, root: str, in_memory: bool) -> Union[bytes, str]:
22
+ os.makedirs(root, exist_ok=True)
23
+
24
+ expected_sha256 = url.split("/")[-2]
25
+ download_target = os.path.join(root, os.path.basename(url))
26
+
27
+ if os.path.exists(download_target) and not os.path.isfile(download_target):
28
+ raise RuntimeError(f"{download_target} exists and is not a regular file")
29
+
30
+ if os.path.isfile(download_target):
31
+ with open(download_target, "rb") as f:
32
+ model_bytes = f.read()
33
+ return model_bytes if in_memory else download_target
34
+
35
+ with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
36
+ with tqdm(
37
+ total=int(source.info().get("Content-Length")),
38
+ ncols=80,
39
+ unit="iB",
40
+ unit_scale=True,
41
+ unit_divisor=1024,
42
+ ) as loop:
43
+ while True:
44
+ buffer = source.read(8192)
45
+ if not buffer:
46
+ break
47
+
48
+ output.write(buffer)
49
+ loop.update(len(buffer))
50
+
51
+ model_bytes = open(download_target, "rb").read()
52
+ return model_bytes if in_memory else download_target
53
+ class CustomWhisperEncoder(nn.Module):
54
+ """
55
+ Lightweight wrapper that only loads the AudioEncoder part of Whisper
56
+ """
57
+ def __init__(self, name: str, device: str = None, download_root: str = None, in_memory: bool = False,):
58
+ super().__init__()
59
+ if device is None:
60
+ device = "cuda" if torch.cuda.is_available() else "cpu"
61
+ if download_root is None:
62
+ default = os.path.join(os.path.expanduser("~"), ".cache")
63
+ download_root = os.path.dirname(os.path.realpath(__file__)) #os.path.join(os.getenv("XDG_CACHE_HOME", default), "whisper")
64
+
65
+ if name in _HF_MODELS:
66
+ checkpoint_file = _download(_HF_MODELS[name], download_root, in_memory)
67
+ elif os.path.isfile(name):
68
+ checkpoint_file = open(name, "rb").read() if in_memory else name
69
+ else:
70
+ raise RuntimeError(
71
+ f"Model {name} not found; available models = {available_models()}"
72
+ )
73
+
74
+ # Load weights
75
+ with (
76
+ io.BytesIO(checkpoint_file) if in_memory else open(checkpoint_file, "rb")
77
+ ) as fp:
78
+ checkpoint = torch.load(fp, map_location=device)
79
+ del checkpoint_file
80
+ dims = ModelDimensions(**checkpoint["dims"])
81
+ self.encoder = AudioEncoder(
82
+ dims.n_mels,
83
+ dims.n_audio_ctx,
84
+ dims.n_audio_state,
85
+ dims.n_audio_head,
86
+ dims.n_audio_layer,
87
+ )
88
+
89
+ self.encoder.load_state_dict(checkpoint["model_state_dict"])
90
+
91
+ if device:
92
+ self.to(device)
93
+
94
+ self.eval()
95
+
96
+ def forward(self, mel: torch.Tensor):
97
+ return self.encoder(mel)
98
+
99
+ class CustomRQBottleneckTransformer(RQBottleneckTransformer):
100
+ def __init__(self, *args, **kwargs):
101
+ super().__init__(*args, **kwargs)
102
+ @classmethod
103
+ def load_vq_only(cls, ref="collabora/spear-tts-pytorch:whisper-vq-stoks-medium-en+pl.model",
104
+ repo_id=None, filename=None, local_filename=None):
105
+ if repo_id is None and filename is None and local_filename is None:
106
+ if ":" in ref:
107
+ repo_id, filename = ref.split(":", 1)
108
+ else:
109
+ local_filename = ref
110
+ if not local_filename:
111
+ local_filename = hf_hub_download(repo_id=repo_id, filename=filename)
112
+
113
+ # Load the spec
114
+ spec = torch.load(local_filename)
115
+
116
+ # Create instance with minimal required components
117
+ instance = cls(**spec['config'], tunables=Tunables(**Tunables.upgrade(spec.get('tunables', {}))))
118
+
119
+ # Load only necessary state dict entries
120
+ required_components = {
121
+ 'rq', 'mlp', 'mlp_ln'
122
+ }
123
+ filtered_state_dict = {
124
+ k: v for k, v in spec['state_dict'].items()
125
+ if any(k.startswith(comp) for comp in required_components)
126
+ }
127
+
128
+ instance.load_state_dict(filtered_state_dict, strict=False)
129
+ instance.eval()
130
+ return instance
131
+
132
+ def load_encoder(self, device=None):
133
+ if self.whmodel is not None: return
134
+ device = device or self.device
135
+ # Use our custom encoder-only model
136
+ if self.whmodel is None:
137
+ encoder = CustomWhisperEncoder(self.whisper_model_name, device=device)
138
+ self.whmodel = [encoder]
139
+ multilingual = not self.whisper_model_name.endswith('.en')
140
+ self.tokenizer = whisper.tokenizer.get_tokenizer(multilingual)
141
+
142
+ def optimzed_encode_mel(self, mel):
143
+ assert len(mel.shape) == 3, "invalid mel spectrogram shape, expect (batch,chn,time)"
144
+ self.load_encoder()
145
+ n = mel.shape[-1]
146
+ if n > whisper.audio.N_FRAMES:
147
+ padding = 0
148
+ padded = mel[:,:,:whisper.audio.N_FRAMES]
149
+ else:
150
+ padding = -n % whisper.audio.N_FRAMES
151
+ padded = F.pad(mel, (0, padding), value=-1.5)
152
+ embs = self.whmodel[0].encoder(padded)#.to(self.whmodel[0].device))#[:,:n//2]
153
+ stoks = self.quantize(embs)
154
+ if self.tunables.mask_embs:
155
+ return stoks[:,:n//2//self.downsample]
156
+ else:
157
+ return stoks
158
+ # overide
159
+ def encode_audio(self, audio):
160
+ if isinstance(audio, str):
161
+ x, sr = torchaudio.load(audio)
162
+ x = torchaudio.transforms.Resample(sr, 16000)(x)[0]
163
+ audio = x.unsqueeze(0)
164
+ return self.optimzed_encode_mel(self.log_mel_spectrogram(audio).to(self.device))
165
+
166
+ if __name__ == "__main__":
167
+ # Load the model
168
+ vqmodel = CustomRQBottleneckTransformer.load_vq_only(
169
+ "whisper-vq-stoks-v3-7lang-fixed.model"
170
+ ).to("cuda")
171
+ vqmodel.load_encoder('cuda')
172
+ vqmodel.eval()
download_model.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import urllib
2
+ from tqdm import tqdm
3
+ from huggingface_hub import hf_hub_download
4
+ import os
5
+
6
+ encoder_url = "https://huggingface.co/jan-hq/WhisperVQ/resolve/main/medium_encoder_only.pt"
7
+
8
+
9
+ def _download(url: str, root: str, in_memory: bool):
10
+ os.makedirs(root, exist_ok=True)
11
+
12
+ expected_sha256 = url.split("/")[-2]
13
+ download_target = os.path.join(root, os.path.basename(url))
14
+
15
+ if os.path.exists(download_target) and not os.path.isfile(download_target):
16
+ raise RuntimeError(
17
+ f"{download_target} exists and is not a regular file")
18
+
19
+ if os.path.isfile(download_target):
20
+ with open(download_target, "rb") as f:
21
+ model_bytes = f.read()
22
+ return model_bytes if in_memory else download_target
23
+
24
+ with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
25
+ with tqdm(
26
+ total=int(source.info().get("Content-Length")),
27
+ ncols=80,
28
+ unit="iB",
29
+ unit_scale=True,
30
+ unit_divisor=1024,
31
+ ) as loop:
32
+ while True:
33
+ buffer = source.read(8192)
34
+ if not buffer:
35
+ break
36
+
37
+ output.write(buffer)
38
+ loop.update(len(buffer))
39
+
40
+ model_bytes = open(download_target, "rb").read()
41
+ return model_bytes if in_memory else download_target
42
+
43
+
44
+ if not os.path.exists(os.path.dirname(os.path.realpath(__file__))+"/whisper-vq-stoks-v3-7lang-fixed.model"):
45
+ hf_hub_download(
46
+ repo_id="jan-hq/WhisperVQ",
47
+ filename="whisper-vq-stoks-v3-7lang-fixed.model",
48
+ local_dir=".",
49
+ )
50
+
51
+ _download(encoder_url, os.path.dirname(os.path.realpath(__file__)), False)
medium_encoder_only.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c713c13a07f8980dfd81a11600424755a1ede2f4904803e7af042a7a94ab168d
3
+ size 614547576
model.yml ADDED
File without changes
requirements.cuda.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ openai-whisper==20231117
2
+ huggingface_hub
3
+ IPython
4
+ pyarrow
5
+ matplotlib
6
+ librosa
7
+ soundfile
8
+ datasets
9
+ encodec
10
+ boto3
11
+ fire
12
+ vector_quantize_pytorch
13
+ webdataset
14
+ whisperspeech
15
+ --extra-index-url https://download.pytorch.org/whl/cu121
16
+ torch==2.2.0
17
+ torchaudio==2.2.0
18
+ numpy==1.26.4
19
+ fastapi
20
+ uvicorn
21
+
22
+ python-multipart
23
+ transformers
24
+ psutil
requirements.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ openai-whisper==20231117
2
+ huggingface_hub
3
+ IPython
4
+ pyarrow
5
+ matplotlib
6
+ librosa
7
+ soundfile
8
+ datasets
9
+ encodec
10
+ boto3
11
+ fire
12
+ vector_quantize_pytorch
13
+ webdataset
14
+ whisperspeech
15
+ torch==2.2.0
16
+ torchaudio==2.2.0
17
+ numpy==1.26.4
18
+ fastapi
19
+ uvicorn
20
+
21
+ python-multipart
22
+ transformers
23
+ psutil
whisper-vq-stoks-v3-7lang-fixed.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09e23368136f07ba474dd50fd728f1d216f4542550c456e8065855969b1df730
3
+ size 90921877