dpredrag radames HF staff commited on
Commit
e4724c0
0 Parent(s):

Duplicate from huggingface-projects/diffusers-gallery-bot

Browse files

Co-authored-by: Radamés Ajna <radames@users.noreply.huggingface.co>

Files changed (10) hide show
  1. .gitattributes +34 -0
  2. .gitignore +22 -0
  3. Dockerfile +25 -0
  4. README.md +12 -0
  5. app.py +330 -0
  6. classifier.py +70 -0
  7. db.py +36 -0
  8. packages.txt +1 -0
  9. requirements.txt +10 -0
  10. schema.sql +13 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ node_modules
3
+ /build
4
+ /.svelte-kit
5
+ /package
6
+ .env
7
+ .env.*
8
+ !.env.example
9
+
10
+ # Ignore files for PNPM, NPM and YARN
11
+ pnpm-lock.yaml
12
+ package-lock.json
13
+ yarn.lock
14
+ venv/
15
+ __pycache__/
16
+ flagged/
17
+ data
18
+ data.db
19
+ data.json
20
+ rooms_data.db
21
+ sd-multiplayer-data/
22
+ diffusers-gallery-data/
Dockerfile ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ WORKDIR /code
4
+
5
+ COPY ./requirements.txt /code/requirements.txt
6
+
7
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
+
9
+ # Git LFS
10
+ RUN apt-get update && apt-get install -y git-lfs
11
+ RUN git lfs install
12
+
13
+
14
+ # User
15
+ RUN useradd -m -u 1000 user
16
+ USER user
17
+ ENV HOME /home/user
18
+ ENV PATH $HOME/.local/bin:$PATH
19
+ WORKDIR $HOME
20
+ RUN mkdir app
21
+ WORKDIR $HOME/app
22
+
23
+ COPY . $HOME/app
24
+
25
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Diffusers Gallery Bot
3
+ emoji: 🤖
4
+ colorFrom: red
5
+ colorTo: indigo
6
+ sdk: docker
7
+ app_port: 7860
8
+ pinned: false
9
+ duplicated_from: huggingface-projects/diffusers-gallery-bot
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ import os
3
+ import re
4
+ import aiohttp
5
+ import requests
6
+ import json
7
+ import subprocess
8
+ import asyncio
9
+ from io import BytesIO
10
+ import uuid
11
+
12
+ from math import ceil
13
+ from tqdm import tqdm
14
+ from pathlib import Path
15
+ from huggingface_hub import Repository
16
+ from PIL import Image, ImageOps
17
+ from fastapi import FastAPI, BackgroundTasks
18
+ from fastapi.responses import HTMLResponse
19
+
20
+ from fastapi_utils.tasks import repeat_every
21
+ from fastapi.middleware.cors import CORSMiddleware
22
+ import boto3
23
+ from datetime import datetime
24
+ from db import Database
25
+
26
+ AWS_ACCESS_KEY_ID = os.getenv('MY_AWS_ACCESS_KEY_ID')
27
+ AWS_SECRET_KEY = os.getenv('MY_AWS_SECRET_KEY')
28
+ AWS_S3_BUCKET_NAME = os.getenv('MY_AWS_S3_BUCKET_NAME')
29
+
30
+
31
+ HF_TOKEN = os.environ.get("HF_TOKEN")
32
+
33
+ S3_DATA_FOLDER = Path("sd-multiplayer-data")
34
+
35
+ DB_FOLDER = Path("diffusers-gallery-data")
36
+
37
+ CLASSIFIER_URL = "https://radames-aesthetic-style-nsfw-classifier.hf.space/run/inference"
38
+ ASSETS_URL = "https://d26smi9133w0oo.cloudfront.net/diffusers-gallery/"
39
+
40
+
41
+ s3 = boto3.client(service_name='s3',
42
+ aws_access_key_id=AWS_ACCESS_KEY_ID,
43
+ aws_secret_access_key=AWS_SECRET_KEY)
44
+
45
+
46
+ repo = Repository(
47
+ local_dir=DB_FOLDER,
48
+ repo_type="dataset",
49
+ clone_from="huggingface-projects/diffusers-gallery-data",
50
+ use_auth_token=True,
51
+ )
52
+ repo.git_pull()
53
+
54
+ database = Database(DB_FOLDER)
55
+
56
+
57
+ async def upload_resize_image_url(session, image_url):
58
+ print(f"Uploading image {image_url}")
59
+ try:
60
+ async with session.get(image_url) as response:
61
+ if response.status == 200 and (response.headers['content-type'].startswith('image') or response.headers['content-type'].startswith('application')):
62
+ image = Image.open(BytesIO(await response.read())).convert('RGB')
63
+ # resize image proportional
64
+ image = ImageOps.fit(image, (400, 400), Image.LANCZOS)
65
+ image_bytes = BytesIO()
66
+ image.save(image_bytes, format="JPEG")
67
+ image_bytes.seek(0)
68
+ fname = f'{uuid.uuid4()}.jpg'
69
+ s3.upload_fileobj(Fileobj=image_bytes, Bucket=AWS_S3_BUCKET_NAME, Key="diffusers-gallery/" + fname,
70
+ ExtraArgs={"ContentType": "image/jpeg", "CacheControl": "max-age=31536000"})
71
+ return fname
72
+ except Exception as e:
73
+ print(f"Error uploading image {image_url}: {e}")
74
+ return None
75
+
76
+
77
+ def fetch_models(page=0):
78
+ response = requests.get(
79
+ f'https://huggingface.co/models-json?pipeline_tag=text-to-image&p={page}')
80
+ data = response.json()
81
+ return {
82
+ "models": [model for model in data['models'] if not model['private']],
83
+ "numItemsPerPage": data['numItemsPerPage'],
84
+ "numTotalItems": data['numTotalItems'],
85
+ "pageIndex": data['pageIndex']
86
+ }
87
+
88
+
89
+ def fetch_model_card(model_id):
90
+ response = requests.get(
91
+ f'https://huggingface.co/{model_id}/raw/main/README.md')
92
+ return response.text
93
+
94
+
95
+ async def find_image_in_model_card(text):
96
+ image_regex = re.compile(r'https?://\S+(?:png|jpg|jpeg|webp)')
97
+ urls = re.findall(image_regex, text)
98
+ if not urls:
99
+ return []
100
+
101
+ async with aiohttp.ClientSession() as session:
102
+ tasks = [asyncio.ensure_future(upload_resize_image_url(
103
+ session, image_url)) for image_url in urls[0:3]]
104
+ return await asyncio.gather(*tasks)
105
+
106
+
107
+ def run_classifier(images):
108
+ images = [i for i in images if i is not None]
109
+ if len(images) > 0:
110
+ # classifying only the first image
111
+ images_urls = [ASSETS_URL + images[0]]
112
+ response = requests.post(CLASSIFIER_URL, json={"data": [
113
+ {"urls": images_urls}, # json urls: list of images urls
114
+ False, # enable/disable gallery image output
115
+ None, # single image input
116
+ None, # files input
117
+ ]}).json()
118
+
119
+ # data response is array data:[[{img0}, {img1}, {img2}...], Label, Gallery],
120
+ class_data = response['data'][0][0]
121
+ class_data_parsed = {row['label']: round(
122
+ row['score'], 3) for row in class_data}
123
+
124
+ # update row data with classificator data
125
+ return class_data_parsed
126
+ else:
127
+ return {}
128
+
129
+
130
+ async def get_all_new_models():
131
+ initial = fetch_models(0)
132
+ num_pages = ceil(initial['numTotalItems'] / initial['numItemsPerPage'])
133
+
134
+ print(
135
+ f"Total items: {initial['numTotalItems']} - Items per page: {initial['numItemsPerPage']}")
136
+ print(f"Found {num_pages} pages")
137
+
138
+ # fetch all models
139
+ new_models = []
140
+ for page in tqdm(range(0, num_pages)):
141
+ print(f"Fetching page {page} of {num_pages}")
142
+ page_models = fetch_models(page)
143
+ new_models += page_models['models']
144
+ return new_models
145
+
146
+
147
+ async def sync_data():
148
+ print("Fetching models")
149
+ repo.git_pull()
150
+ all_models = await get_all_new_models()
151
+ print(f"Found {len(all_models)} models")
152
+ # save list of all models for ids
153
+ with open(DB_FOLDER / "models.json", "w") as f:
154
+ json.dump(all_models, f)
155
+ # with open(DB_FOLDER / "models.json", "r") as f:
156
+ # new_models = json.load(f)
157
+
158
+ new_models_ids = [model['id'] for model in all_models]
159
+
160
+ # get existing models
161
+ with database.get_db() as db:
162
+ cursor = db.cursor()
163
+ cursor.execute("SELECT id FROM models")
164
+ existing_models = [row['id'] for row in cursor.fetchall()]
165
+ models_ids_to_add = list(set(new_models_ids) - set(existing_models))
166
+ # find all models id to add from new_models
167
+ models = [model for model in all_models if model['id'] in models_ids_to_add]
168
+
169
+ print(f"Found {len(models)} new models")
170
+ for model in tqdm(models):
171
+ model_id = model['id']
172
+ likes = model['likes']
173
+ downloads = model['downloads']
174
+ model_card = fetch_model_card(model_id)
175
+ images = await find_image_in_model_card(model_card)
176
+
177
+ classifier = run_classifier(images)
178
+ print(images, classifier)
179
+ # update model row with image and classifier data
180
+ with database.get_db() as db:
181
+ cursor = db.cursor()
182
+ cursor.execute("INSERT INTO models(id, data, likes, downloads) VALUES (?, ?, ?, ?)",
183
+ [model_id,
184
+ json.dumps({
185
+ **model,
186
+ "images": images,
187
+ "class": classifier
188
+ }),
189
+ likes,
190
+ downloads
191
+ ])
192
+ db.commit()
193
+ print("Try to update images again")
194
+ with database.get_db() as db:
195
+ cursor = db.cursor()
196
+ cursor.execute(
197
+ "SELECT * from models WHERE json_array_length(data, '$.images') < 1;")
198
+ models_no_images = list(cursor.fetchall())
199
+ for model in tqdm(models_no_images):
200
+ model_id = model['id']
201
+ model_data = json.loads(model['data'])
202
+ print("Updating model", model_id)
203
+ model_card = fetch_model_card(model_id)
204
+ images = await find_image_in_model_card(model_card)
205
+ classifier = run_classifier(images)
206
+
207
+ # update model row with image and classifier data
208
+ with database.get_db() as db:
209
+ cursor = db.cursor()
210
+ cursor.execute("UPDATE models SET data = ? WHERE id = ?",
211
+ [json.dumps(model_data), model_id])
212
+ db.commit()
213
+
214
+ print("Update likes and downloads")
215
+ for model in tqdm(all_models):
216
+ model_id = model['id']
217
+ likes = model['likes']
218
+ downloads = model['downloads']
219
+ with database.get_db() as db:
220
+ cursor = db.cursor()
221
+ cursor.execute("UPDATE models SET likes = ?, downloads = ? WHERE id = ?",
222
+ [likes, downloads, model_id])
223
+ db.commit()
224
+
225
+ print("Updating DB repository")
226
+ time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
227
+ cmd = f"git add . && git commit --amend -m 'update at {time}' && git push --force"
228
+ print(cmd)
229
+ subprocess.Popen(cmd, cwd=DB_FOLDER, shell=True)
230
+
231
+
232
+ app = FastAPI()
233
+ app.add_middleware(
234
+ CORSMiddleware,
235
+ allow_origins=["*"],
236
+ allow_credentials=True,
237
+ allow_methods=["*"],
238
+ allow_headers=["*"],
239
+ )
240
+
241
+
242
+ # @ app.get("/sync")
243
+ # async def sync(background_tasks: BackgroundTasks):
244
+ # await sync_data()
245
+ # return "Synced data to huggingface datasets"
246
+
247
+
248
+ MAX_PAGE_SIZE = 30
249
+
250
+
251
+ class Sort(str, Enum):
252
+ trending = "trending"
253
+ recent = "recent"
254
+ likes = "likes"
255
+
256
+
257
+ class Style(str, Enum):
258
+ all = "all"
259
+ anime = "anime"
260
+ s3D = "3d"
261
+ realistic = "realistic"
262
+ nsfw = "nsfw"
263
+
264
+
265
+ @ app.get("/api/models")
266
+ def get_page(page: int = 1, sort: Sort = Sort.trending, style: Style = Style.all):
267
+ page = page if page > 0 else 1
268
+ if sort == Sort.trending:
269
+ sort_query = "likes / MYPOWER((JULIANDAY('now') - JULIANDAY(datetime(json_extract(data, '$.lastModified')))) + 2, 2) DESC"
270
+ elif sort == Sort.recent:
271
+ sort_query = "datetime(json_extract(data, '$.lastModified')) DESC"
272
+ elif sort == Sort.likes:
273
+ sort_query = "likes DESC"
274
+
275
+ if style == Style.all:
276
+ style_query = "isNFSW = false"
277
+ elif style == Style.anime:
278
+ style_query = "json_extract(data, '$.class.anime') > 0.1 AND isNFSW = false"
279
+ elif style == Style.s3D:
280
+ style_query = "json_extract(data, '$.class.3d') > 0.1 AND isNFSW = false"
281
+ elif style == Style.realistic:
282
+ style_query = "json_extract(data, '$.class.real_life') > 0.1 AND isNFSW = false"
283
+ elif style == Style.nsfw:
284
+ style_query = "isNFSW = true"
285
+
286
+ with database.get_db() as db:
287
+ cursor = db.cursor()
288
+ cursor.execute(f"""
289
+ SELECT *, COUNT(*) OVER() AS total, isNFSW
290
+ FROM (
291
+ SELECT * ,
292
+ json_extract(data, '$.class.explicit') > 0.3 OR json_extract(data, '$.class.suggestive') > 0.3 AS isNFSW
293
+ FROM models
294
+ )
295
+ WHERE likes > 3 AND {style_query}
296
+ ORDER BY {sort_query}
297
+ LIMIT {MAX_PAGE_SIZE} OFFSET {(page - 1) * MAX_PAGE_SIZE}
298
+ """)
299
+ results = cursor.fetchall()
300
+ total = results[0]['total'] if results else 0
301
+ total_pages = (total + MAX_PAGE_SIZE - 1) // MAX_PAGE_SIZE
302
+ models_data = []
303
+ for result in results:
304
+ data = json.loads(result['data'])
305
+ # update downloads and likes from db table
306
+ data['downloads'] = result['downloads']
307
+ data['likes'] = result['likes']
308
+ data['isNFSW'] = bool(result['isNFSW'])
309
+ models_data.append(data)
310
+
311
+ return {
312
+ "models": models_data,
313
+ "totalPages": total_pages
314
+ }
315
+
316
+
317
+ @app.get("/")
318
+ def read_root():
319
+ # return html page from string
320
+ return HTMLResponse("""
321
+ <p>Just a bot to sync data from diffusers gallery please go to
322
+ <a href="https://huggingface.co/spaces/huggingface-projects/diffusers-gallery">https://huggingface.co/spaces/huggingface-projects/diffusers-gallery</a>
323
+ </p>""")
324
+
325
+
326
+ @app.on_event("startup")
327
+ @repeat_every(seconds=60 * 60 * 6, wait_first=False)
328
+ async def repeat_sync():
329
+ await sync_data()
330
+ return "Synced data to huggingface datasets"
classifier.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import requests
4
+ import json
5
+ import subprocess
6
+ from io import BytesIO
7
+ import uuid
8
+
9
+ from math import ceil
10
+ from tqdm import tqdm
11
+ from pathlib import Path
12
+
13
+ from db import Database
14
+
15
+ DB_FOLDER = Path("diffusers-gallery-data")
16
+
17
+ database = Database(DB_FOLDER)
18
+
19
+
20
+ CLASSIFIER_URL = "https://radames-aesthetic-style-nsfw-classifier.hf.space/run/inference"
21
+ ASSETS_URL = "https://d26smi9133w0oo.cloudfront.net/diffusers-gallery/"
22
+
23
+
24
+ def main():
25
+
26
+ with database.get_db() as db:
27
+ cursor = db.cursor()
28
+ cursor.execute("""
29
+ SELECT *
30
+ FROM models
31
+ """)
32
+ results = list(cursor.fetchall())
33
+
34
+ for row in tqdm(results):
35
+ row_id = row['id']
36
+ # keep json data on row_data
37
+ row_data = json.loads(row['data'])
38
+ print("updating row", row_id)
39
+ images = row_data['images']
40
+
41
+ # filter nones
42
+ images = [i for i in images if i is not None]
43
+ if len(images) > 0:
44
+ # classifying only the first image
45
+ images_urls = [ASSETS_URL + images[0]]
46
+ response = requests.post(CLASSIFIER_URL, json={"data": [
47
+ {"urls": images_urls}, # json urls: list of images urls
48
+ False, # enable/disable gallery image output
49
+ None, # single image input
50
+ None, # files input
51
+ ]}).json()
52
+
53
+ # data response is array data:[[{img0}, {img1}, {img2}...], Label, Gallery],
54
+ class_data = response['data'][0][0]
55
+ class_data_parsed = {row['label']: round(
56
+ row['score'], 3) for row in class_data}
57
+
58
+ # update row data with classificator data
59
+ row_data['class'] = class_data_parsed
60
+ else:
61
+ row_data['class'] = {}
62
+ with database.get_db() as db:
63
+ cursor = db.cursor()
64
+ cursor.execute("UPDATE models SET data = ? WHERE id = ?",
65
+ [json.dumps(row_data), row_id])
66
+ db.commit()
67
+
68
+
69
+ if __name__ == "__main__":
70
+ main()
db.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sqlite3
2
+ from pathlib import Path
3
+ import math
4
+
5
+ def power(x, y):
6
+ return math.pow(x, y)
7
+
8
+
9
+
10
+ class Database:
11
+ def __init__(self, db_path=None):
12
+ if db_path is None:
13
+ raise ValueError("db_path must be provided")
14
+ self.db_path = db_path
15
+ self.db_file = self.db_path / "models.db"
16
+ if not self.db_file.exists():
17
+ print("Creating database")
18
+ print("DB_FILE", self.db_file)
19
+ db = sqlite3.connect(self.db_file)
20
+ with open(Path("schema.sql"), "r") as f:
21
+ db.executescript(f.read())
22
+ db.commit()
23
+ db.close()
24
+
25
+ def get_db(self):
26
+ db = sqlite3.connect(self.db_file, check_same_thread=False)
27
+ db.create_function("MYPOWER", 2, power)
28
+ db.row_factory = sqlite3.Row
29
+ return db
30
+
31
+ def __enter__(self):
32
+ self.db = self.get_db()
33
+ return self.db
34
+
35
+ def __exit__(self, exc_type, exc_value, traceback):
36
+ self.db.close()
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ sqlite3
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ huggingface-hub
2
+ fastapi-utils
3
+ uvicorn
4
+ tqdm
5
+ fastapi
6
+ requests
7
+ asyncio
8
+ aiohttp
9
+ Pillow
10
+ boto3
schema.sql ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PRAGMA foreign_keys = OFF;
2
+
3
+ BEGIN TRANSACTION;
4
+
5
+ CREATE TABLE models (
6
+ id TEXT PRIMARY KEY NOT NULL,
7
+ data json,
8
+ likes INTEGER DEFAULT 0 NOT NULL,
9
+ downloads INTEGER DEFAULT 0 NOT NULL,
10
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
11
+ );
12
+
13
+ COMMIT;