import hashlib
import json
import os
import sys
from io import BytesIO
from typing import IO

import requests
from tqdm import tqdm
from zkl_promptui import confirm_clear_dir_path

root_dir_path = os.path.join(os.path.dirname(__file__), "../../..")
sys.path.append(root_dir_path)

from scripts.config import datasets_dir_path

base_url = "https://dumps.wikimedia.org"


def make_dump_base_url(lang, date):
    return f"{base_url}/{lang}wiki/{date}/"


def make_dump_info_file_url(lang, date):
    return make_dump_base_url(lang, date) + "dumpstatus.json"


def download(url: str, io: IO, *,
    progress: tqdm | None = None,
    total_size: int | None = None,
    expected_md5: str | None = None,
    expected_sha1: str | None = None,
    chunk_size: int = 16 * 1024,
):
    response = requests.get(url, stream=True)

    if progress is None:
        if total_size is None:
            total_size = int(response.headers.get('content-length', None))
        progress = tqdm(
            desc="Downloading",
            total=total_size,
            unit='iB',
            unit_scale=True,
            unit_divisor=1024)

    hash_md5 = hashlib.md5() if expected_md5 is not None else None
    hast_sha1 = hashlib.sha1() if expected_sha1 is not None else None

    for data in response.iter_content(chunk_size):
        if hash_md5 is not None:
            hash_md5.update(data)
        if hast_sha1 is not None:
            hast_sha1.update(data)
        io.write(data)
        progress.update(len(data))

    if hash_md5 is not None:
        assert hash_md5.hexdigest() == expected_md5
    if hast_sha1 is not None:
        assert hast_sha1.hexdigest() == expected_sha1


def cli_download_wikipedia_dump(lang: str, date: str, dir_path: os.PathLike | str):
    confirm_clear_dir_path(dir_path)

    dump_info_io = BytesIO()
    download(make_dump_info_file_url(lang, date), dump_info_io)
    dump_info_bytes = dump_info_io.getvalue()
    dump_info = json.loads(dump_info_bytes)

    dump_status = dump_info['jobs']['articlesmultistreamdump']['status']
    if not dump_status == 'done':
        raise RuntimeError(f"Specified dump {lang=}, {date=} is at status={dump_status}, expected status=done")

    dump_files_info = dump_info['jobs']['articlesmultistreamdump']["files"]
    dump_files_total_size = sum(info['size'] for info in dump_files_info.values())
    dump_files_total_num = len(dump_files_info)
    progress = tqdm(
        desc="Downloading",
        total=dump_files_total_size,
        unit='iB',
        unit_scale=True,
        unit_divisor=1024)
    for i, (name, info) in enumerate(dump_files_info.items()):
        file_path = os.path.join(dir_path, name)
        file_path_downloading = file_path + ".downloading"

        progress.desc = f"Downloading {i + 1}/{dump_files_total_num} files"
        with open(file_path_downloading, "wb") as fp:
            download(
                base_url + info['url'], fp,
                progress=progress,
                expected_md5=info['md5'],
                expected_sha1=info['sha1'])

        os.rename(file_path_downloading, file_path)


# main

if __name__ == '__main__':
    lang = "zh"
    date = "20231101"
    dataset_path = os.path.join(datasets_dir_path, "wikipedia", lang, date)

    cli_download_wikipedia_dump(lang, date, dataset_path)
