File size: 8,890 Bytes
9a6e4f2
 
8fbb0f5
963c322
9a6e4f2
8fbb0f5
 
963c322
 
8fbb0f5
9a6e4f2
 
55877ba
963c322
7098224
8fbb0f5
963c322
41212e8
 
963c322
c882e97
7098224
 
9a6e4f2
 
55877ba
c882e97
8fbb0f5
 
 
 
 
33c1203
 
8fbb0f5
 
 
 
 
 
c882e97
 
8fbb0f5
 
 
33c1203
8fbb0f5
 
33c1203
 
8fbb0f5
33c1203
 
 
 
 
 
fc341f3
33c1203
 
 
 
 
 
 
c882e97
8fbb0f5
33c1203
8fbb0f5
 
 
 
 
 
 
 
 
 
 
 
c882e97
8fbb0f5
 
 
9a6e4f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7098224
 
 
 
 
 
 
 
 
 
 
 
9a6e4f2
8fbb0f5
 
 
9a6e4f2
 
 
 
 
 
 
 
c882e97
8fbb0f5
 
9a6e4f2
 
8fbb0f5
 
 
 
41212e8
 
 
 
 
8fbb0f5
 
 
 
 
 
 
55877ba
8fbb0f5
 
 
 
 
 
 
 
 
 
 
 
 
 
33c1203
 
 
 
 
 
 
 
 
8fbb0f5
33c1203
8fbb0f5
 
33c1203
8fbb0f5
33c1203
 
 
8fbb0f5
33c1203
 
8fbb0f5
33c1203
8fbb0f5
33c1203
8fbb0f5
33c1203
8fbb0f5
33c1203
 
 
 
 
 
 
8fbb0f5
33c1203
8fbb0f5
33c1203
 
8fbb0f5
33c1203
 
 
8fbb0f5
 
33c1203
8fbb0f5
33c1203
8fbb0f5
 
 
 
 
 
 
 
 
 
 
 
 
c882e97
8fbb0f5
 
 
 
 
e6064e2
 
8fbb0f5
e6064e2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
import asyncio
import concurrent.futures
import json
import logging
import os
import sqlite3
from contextlib import asynccontextmanager
from typing import List

import numpy as np
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from cashews import NOT_NONE, cache
from fastapi import FastAPI, HTTPException, Query
from huggingface_hub import login, upload_file
from pandas import Timestamp
from pydantic import BaseModel
from starlette.responses import RedirectResponse

from data_loader import refresh_data

login(token=os.getenv("HF_TOKEN"))

UPDATE_SCHEDULE = {"hour": os.getenv("UPDATE_INTERVAL_HOURS", "*/6")}

cache.setup("mem://?check_interval=10&size=10000")
logger = logging.getLogger(__name__)


def get_db_connection():
    conn = sqlite3.connect("datasets.db")
    conn.row_factory = sqlite3.Row
    conn.execute("PRAGMA journal_mode = WAL")
    conn.execute("PRAGMA synchronous = NORMAL")
    return conn


def setup_database():
    conn = get_db_connection()
    c = conn.cursor()
    c.execute(
        """CREATE TABLE IF NOT EXISTS datasets
                 (hub_id TEXT PRIMARY KEY, 
                  likes INTEGER,
                  downloads INTEGER,
                  tags JSON,
                  created_at INTEGER,
                  last_modified INTEGER,
                  license JSON,
                  language JSON,
                  config_name TEXT,
                  column_names JSON,
                  features JSON)"""
    )
    c.execute(
        """
    CREATE INDEX IF NOT EXISTS idx_column_names 
    ON datasets(column_names)
    """
    )
    c.execute(
        """
    CREATE INDEX IF NOT EXISTS idx_downloads_likes
    ON datasets(downloads DESC, likes DESC)
    """
    )
    conn.commit()
    c.execute("ANALYZE")
    conn.close()


def serialize_numpy(obj):
    if isinstance(obj, np.ndarray):
        return obj.tolist()
    if isinstance(obj, np.integer):
        return int(obj)
    if isinstance(obj, np.floating):
        return float(obj)
    if isinstance(obj, Timestamp):
        return int(obj.timestamp())
    logger.error(f"Object of type {type(obj)} is not JSON serializable")
    raise TypeError(f"Object of type {type(obj)} is not JSON serializable")


def background_refresh_data():
    logger.info("Starting background data refresh")
    try:
        return refresh_data()
    except Exception as e:
        logger.error(f"Error in background data refresh: {str(e)}")
        return None


async def update_database():
    logger.info("Starting scheduled data refresh")

    # Run refresh_data in a background thread
    with concurrent.futures.ThreadPoolExecutor() as executor:
        future = executor.submit(background_refresh_data)

        # Wait for the background task to complete, but allow for cancellation
        try:
            datasets = await asyncio.get_event_loop().run_in_executor(
                None, future.result
            )
        except asyncio.CancelledError:
            future.cancel()
            logger.info("Data refresh cancelled")
            return

    if datasets is None:
        logger.error("Data refresh failed, skipping database update")
        return

    conn = get_db_connection()
    try:
        c = conn.cursor()
        c.executemany(
            """
            INSERT OR REPLACE INTO datasets 
            (hub_id, likes, downloads, tags, created_at, last_modified, license, language, config_name, column_names, features) 
            VALUES (?, ?, ?, json(?), ?, ?, json(?), json(?), ?, json(?), json(?))
            """,
            [
                (
                    data["hub_id"],
                    data.get("likes", 0),
                    data.get("downloads", 0),
                    json.dumps(data.get("tags", []), default=serialize_numpy),
                    int(data["created_at"].timestamp())
                    if isinstance(data["created_at"], Timestamp)
                    else data.get("created_at", 0),
                    int(data["last_modified"].timestamp())
                    if isinstance(data["last_modified"], Timestamp)
                    else data.get("last_modified", 0),
                    json.dumps(data.get("license", []), default=serialize_numpy),
                    json.dumps(data.get("language", []), default=serialize_numpy),
                    data.get("config_name", ""),
                    json.dumps(data.get("column_names", []), default=serialize_numpy),
                    json.dumps(data.get("features", []), default=serialize_numpy),
                )
                for data in datasets
            ],
        )
        conn.commit()
        logger.info("Scheduled data refresh completed")
    except Exception as e:
        logger.error(f"Error during database update: {str(e)}")
        conn.rollback()
    finally:
        conn.close()

    # Upload the database file to Hugging Face Hub
    try:
        upload_file(
            path_or_fileobj="datasets.db",
            path_in_repo="datasets.db",
            repo_id="librarian-bots/column-db",
            repo_type="dataset",
        )
        logger.info("Database file uploaded to Hugging Face Hub successfully")
    except Exception as e:
        logger.error(f"Error uploading database file to Hugging Face Hub: {str(e)}")


@asynccontextmanager
async def lifespan(app: FastAPI):
    setup_database()
    logger.info("Performing initial data refresh")
    await update_database()

    # Set up the scheduler
    scheduler = AsyncIOScheduler()
    # Schedule the update_database function using the UPDATE_SCHEDULE configuration
    scheduler.add_job(update_database, CronTrigger(**UPDATE_SCHEDULE))
    scheduler.start()

    yield

    scheduler.shutdown()


app = FastAPI(lifespan=lifespan)


@app.get("/", include_in_schema=False)
def root():
    return RedirectResponse(url="/docs")


class SearchResponse(BaseModel):
    total: int
    page: int
    page_size: int
    results: List[dict]


@cache(ttl="1h", condition=NOT_NONE)
@app.get("/search", response_model=SearchResponse)
async def search_datasets(
    columns: List[str] = Query(...),
    match_all: bool = Query(False),
    page: int = Query(1, ge=1),
    page_size: int = Query(10, ge=1, le=1000),
):
    offset = (page - 1) * page_size
    conn = get_db_connection()
    c = conn.cursor()

    try:
        if match_all:
            query = """
            SELECT *, (
                SELECT COUNT(*)
                FROM json_each(column_names)
                WHERE json_each.value IN ({})
            ) as match_count
            FROM datasets
            WHERE match_count = ?
            ORDER BY downloads DESC, likes DESC
            LIMIT ? OFFSET ?
            """.format(",".join("?" * len(columns)))
            c.execute(query, (*columns, len(columns), page_size, offset))
        else:
            query = """
            SELECT * FROM datasets
            WHERE EXISTS (
                SELECT 1
                FROM json_each(column_names)
                WHERE json_each.value IN ({})
            )
            ORDER BY downloads DESC, likes DESC
            LIMIT ? OFFSET ?
            """.format(",".join("?" * len(columns)))
            c.execute(query, (*columns, page_size, offset))

        results = [dict(row) for row in c.fetchall()]

        # Get total count
        if match_all:
            count_query = """
            SELECT COUNT(*) as total FROM datasets
            WHERE (
                SELECT COUNT(*)
                FROM json_each(column_names)
                WHERE json_each.value IN ({})
            ) = ?
            """.format(",".join("?" * len(columns)))
            c.execute(count_query, (*columns, len(columns)))
        else:
            count_query = """
            SELECT COUNT(*) as total FROM datasets
            WHERE EXISTS (
                SELECT 1
                FROM json_each(column_names)
                WHERE json_each.value IN ({})
            )
            """.format(",".join("?" * len(columns)))
            c.execute(count_query, columns)

        total = c.fetchone()["total"]

        for result in results:
            result["tags"] = json.loads(result["tags"])
            result["license"] = json.loads(result["license"])
            result["language"] = json.loads(result["language"])
            result["column_names"] = json.loads(result["column_names"])
            result["features"] = json.loads(result["features"])

        return SearchResponse(
            total=total, page=page, page_size=page_size, results=results
        )

    except sqlite3.Error as e:
        logger.error(f"Database error: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Database error: {str(e)}") from e
    finally:
        conn.close()


if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8000)