File size: 8,067 Bytes
834bcf3
e8ea372
 
a101d9b
 
e8ea372
8964ef4
 
 
 
 
 
a101d9b
a4b71bb
a101d9b
8964ef4
a101d9b
e8ea372
a4b71bb
8964ef4
a101d9b
 
e8ea372
1b35184
 
 
8964ef4
 
a101d9b
e8ea372
8964ef4
a4b71bb
 
 
42e31b9
 
 
 
8964ef4
 
 
a4b71bb
8964ef4
cf41b19
 
 
 
 
 
 
a4b71bb
 
e8ea372
 
8964ef4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a101d9b
 
 
 
e014e9a
a101d9b
 
 
 
 
 
 
 
 
42e31b9
a101d9b
42e31b9
a101d9b
 
 
8964ef4
a101d9b
 
8964ef4
 
a101d9b
8964ef4
 
 
 
a101d9b
 
42e31b9
 
 
 
 
 
 
 
 
 
 
e8ea372
42e31b9
 
 
 
 
e8ea372
42e31b9
 
 
 
a101d9b
42e31b9
 
8964ef4
 
a101d9b
8964ef4
 
a101d9b
 
 
42e31b9
a101d9b
 
 
42e31b9
 
a101d9b
6adeb37
a101d9b
8964ef4
42e31b9
 
 
8964ef4
42e31b9
8964ef4
42e31b9
 
 
 
 
a101d9b
 
42e31b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
834bcf3
 
1b35184
 
 
834bcf3
1b35184
a101d9b
 
a4b71bb
 
 
 
 
 
 
e8ea372
 
fe95fdf
 
42e31b9
fe95fdf
e8ea372
 
a101d9b
 
 
834bcf3
 
 
 
 
 
8964ef4
834bcf3
a101d9b
834bcf3
838a51d
834bcf3
 
 
 
 
a101d9b
 
834bcf3
8964ef4
 
42e31b9
834bcf3
 
 
a101d9b
42e31b9
a101d9b
 
 
42e31b9
a101d9b
 
 
 
a4b71bb
 
 
a101d9b
8964ef4
834bcf3
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
from enum import Enum
import os
import re
import aiohttp
import requests
import json
import subprocess
import asyncio
from io import BytesIO
import uuid

from math import ceil
from tqdm import tqdm
from pathlib import Path
from huggingface_hub import Repository
from PIL import Image, ImageOps
from fastapi import FastAPI, BackgroundTasks
from fastapi_utils.tasks import repeat_every
from fastapi.middleware.cors import CORSMiddleware
import boto3

from db import Database

AWS_ACCESS_KEY_ID = os.getenv('MY_AWS_ACCESS_KEY_ID')
AWS_SECRET_KEY = os.getenv('MY_AWS_SECRET_KEY')
AWS_S3_BUCKET_NAME = os.getenv('MY_AWS_S3_BUCKET_NAME')


HF_TOKEN = os.environ.get("HF_TOKEN")

S3_DATA_FOLDER = Path("sd-multiplayer-data")

DB_FOLDER = Path("diffusers-gallery-data")

CLASSIFIER_URL = "https://radames-aesthetic-style-nsfw-classifier.hf.space/run/inference"
ASSETS_URL = "https://d26smi9133w0oo.cloudfront.net/diffusers-gallery/"


s3 = boto3.client(service_name='s3',
                  aws_access_key_id=AWS_ACCESS_KEY_ID,
                  aws_secret_access_key=AWS_SECRET_KEY)


repo = Repository(
    local_dir=DB_FOLDER,
    repo_type="dataset",
    clone_from="huggingface-projects/diffusers-gallery-data",
    use_auth_token=True,
)
repo.git_pull()

database = Database(DB_FOLDER)


async def upload_resize_image_url(session, image_url):
    print(f"Uploading image {image_url}")
    async with session.get(image_url) as response:
        if response.status == 200 and response.headers['content-type'].startswith('image'):
            image = Image.open(BytesIO(await response.read())).convert('RGB')
            # resize image proportional
            image = ImageOps.fit(image, (400, 400), Image.LANCZOS)
            image_bytes = BytesIO()
            image.save(image_bytes, format="JPEG")
            image_bytes.seek(0)
            fname = f'{uuid.uuid4()}.jpg'
            s3.upload_fileobj(Fileobj=image_bytes, Bucket=AWS_S3_BUCKET_NAME, Key="diffusers-gallery/" + fname,
                              ExtraArgs={"ContentType": "image/jpeg", "CacheControl": "max-age=31536000"})
            return fname
    return None


def fetch_models(page=0):
    response = requests.get(
        f'https://huggingface.co/models-json?pipeline_tag=text-to-image&p={page}')
    data = response.json()
    return {
        "models": [model for model in data['models'] if not model['private']],
        "numItemsPerPage": data['numItemsPerPage'],
        "numTotalItems": data['numTotalItems'],
        "pageIndex": data['pageIndex']
    }


def fetch_model_card(model_id):
    response = requests.get(
        f'https://huggingface.co/{model_id}/raw/main/README.md')
    return response.text


async def find_image_in_model_card(text):
    image_regex = re.compile(r'https?://\S+(?:png|jpg|jpeg|webp)')
    urls = re.findall(image_regex, text)
    if not urls:
        return []

    async with aiohttp.ClientSession() as session:
        tasks = [asyncio.ensure_future(upload_resize_image_url(
            session, image_url)) for image_url in urls[0:3]]
        return await asyncio.gather(*tasks)


def run_classifier(images):
    images = [i for i in images if i is not None]
    if len(images) > 0:
        # classifying only the first image
        images_urls = [ASSETS_URL + images[0]]
        response = requests.post(CLASSIFIER_URL, json={"data": [
            {"urls": images_urls},  # json urls: list of images urls
            False,  # enable/disable gallery image output
            None,  # single image input
            None,  # files input
        ]}).json()

        # data response is array data:[[{img0}, {img1}, {img2}...], Label, Gallery],
        class_data = response['data'][0][0]
        print(class_data)
        class_data_parsed = {row['label']: round(
            row['score'], 3) for row in class_data}

        # update row data with classificator data
        return class_data_parsed
    else:
        return {}


async def get_all_new_models():
    initial = fetch_models(0)
    num_pages = ceil(initial['numTotalItems'] / initial['numItemsPerPage'])

    print(
        f"Total items: {initial['numTotalItems']} - Items per page: {initial['numItemsPerPage']}")
    print(f"Found {num_pages} pages")

    # fetch all models
    new_models = []
    for page in tqdm(range(0, num_pages)):
        print(f"Fetching page {page} of {num_pages}")
        page_models = fetch_models(page)
        new_models += page_models['models']
    return new_models


async def sync_data():
    print("Fetching models")
    new_models = await get_all_new_models()
    print(f"Found {len(new_models)} models")
    # save list of all models for ids
    with open(DB_FOLDER / "models.json", "w") as f:
        json.dump(new_models, f)
    # with open(DB_FOLDER / "models.json", "r") as f:
    #     new_models = json.load(f)

    new_models_ids = [model['id'] for model in new_models]

    # get existing models
    with database.get_db() as db:
        cursor = db.cursor()
        cursor.execute("SELECT id FROM models")
        existing_models = [row['id'] for row in cursor.fetchall()]
    models_ids_to_add = list(set(new_models_ids) - set(existing_models))
    # find all models id to add from new_models
    models = [model for model in new_models if model['id'] in models_ids_to_add]

    print(f"Found {len(models)} new models")
    for model in tqdm(models):
        model_id = model['id']
        model_card = fetch_model_card(model_id)
        images = await find_image_in_model_card(model_card)
        classifier = run_classifier(images)
        # update model row with image and classifier data
        with database.get_db() as db:
            cursor = db.cursor()
            cursor.execute("INSERT INTO models(id, data) VALUES (?, ?)",
                           [model_id, json.dumps({
                               **model,
                               "images": images,
                               "class": classifier
                           })])
            db.commit()

    if (len(models) > 0):
        print("Updating repository")
        subprocess.Popen(
            "git add . && git commit --amend -m 'update' && git push --force", cwd=DB_FOLDER, shell=True)
    else:
        print("No new models found")

app = FastAPI()
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# @ app.get("/sync")
# async def sync(background_tasks: BackgroundTasks):
#     await sync_data()
#     return "Synced data to huggingface datasets"


MAX_PAGE_SIZE = 30


class Sort(str, Enum):
    trending = "trending"
    recent = "recent"
    likes = "likes"


@ app.get("/api/models")
def get_page(page: int = 1, sort: Sort = Sort.trending):
    page = page if page > 0 else 1
    if sort == Sort.trending:
        sort_query = "json_extract(data, '$.likes') / POW((JULIANDAY('now') - JULIANDAY(datetime(json_extract(data, '$.lastModified')))) + 2, 2) DESC"
    elif sort == Sort.recent:
        sort_query = "datetime(json_extract(data, '$.lastModified')) DESC"
    elif sort == Sort.likes:
        sort_query = "json_extract(data, '$.likes') DESC"

    with database.get_db() as db:
        cursor = db.cursor()
        cursor.execute(f"""
            SELECT *, COUNT(*) OVER() AS total
            FROM models
            WHERE json_extract(data, '$.likes') > 4
            ORDER BY {sort_query}
            LIMIT {MAX_PAGE_SIZE} OFFSET {(page - 1) * MAX_PAGE_SIZE}
        """)
        results = cursor.fetchall()
        total = results[0]['total'] if results else 0
        total_pages = (total + MAX_PAGE_SIZE - 1) // MAX_PAGE_SIZE

    return {
        "models": [json.loads(result['data']) for result in results],
        "totalPages": total_pages
    }


@app.get("/")
def read_root():
    return "Just a bot to sync data from diffusers gallery"


# @app.on_event("startup")
# @repeat_every(seconds=60 * 60 * 24, wait_first=True)
# async def repeat_sync():
#     await sync_data()
#     return "Synced data to huggingface datasets"