"""
SE SE ALL
get all se se image on a page with some specific criteria.
"""

import re
from datetime import datetime
from requests import Session
import time
from typing import List, Optional, Dict, Iterable, Tuple
from dataclasses import dataclass, field
from lxml import etree
from tqdm import tqdm
import os
from pathlib import Path
from enum import Enum
import jsonpickle as jpkl
from crc32c import crc32c

import rock_hammer as rh


DEFAULT_UA = r"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0"
FOLDER = Path("D://Pictures/SESE")
CACHE_FOLDER = FOLDER / "# cache"
DATE_FORMAT = r"%Y-%m-%d %H:%M:%S"


def create_default_session() -> Session:
    rs = Session()
    rs.headers["User-Agent"] = DEFAULT_UA
    return rs


@dataclass
class ImageTask:
    link: str
    ext: str
    header: dict[str, str]


@dataclass
class PageResult:
    link: str
    title: str
    img_links: List[ImageTask]


def download_all_img(page: PageResult, s: Session, *, overwrite=False):
    # 根据网页标题建立图片子文件夹
    title = page.title
    folder_name = rh.file_name_regulate(title).strip()
    to_folder = FOLDER / folder_name
    if not to_folder.exists():
        to_folder.mkdir()

    rh.sys_print("-> start downloading image...")
    img_links = page.img_links
    i = 0
    for task in tqdm(img_links):
        i += 1
        link_crc = crc32c(task.link.encode("utf-8"))
        file_name = f"{i}_{link_crc}{task.ext}"
        file_path = to_folder / file_name

        if file_path.exists() and not overwrite:
            continue

        r = s.get(task.link, headers=task.header)
        if r.status_code != 200:
            print()
            rh.sys_print(f"!> request: {task.link}")
            rh.sys_print(f"!> header:")
            print(task.header)
            rh.sys_print(f"!>status code: {r.status_code}")
            print("==================")
            print(r.text)
            print("==================")

            if r.status_code == 404:
                rh.sys_print(f"!> skip")
            else:
                raise rh.Ops("Something error downloading img.")
        # TODO: network and io exceptions
        file_path.write_bytes(r.content)
        time.sleep(0.5)


def build_page_link_list(a_link: str, page_count) -> List[str]:
    dash_pos = a_link.rfind("-")
    if dash_pos == -1 or dash_pos < len(a_link) - 10:
        index_link_prefix = a_link.removesuffix(".html")
    else:
        index_link_prefix = a_link[:dash_pos]

    page_list = [f"{index_link_prefix}-{i}.html" for i in range(1, page_count + 1)]
    return page_list


def analyze_se_page(link: str, s: Session) -> PageResult:
    rh.sys_print("-> getting page...")
    r = s.get(link)
    if not r.ok:
        raise rh.Ops(f"Something error fetching page -> {link} with code {r.status_code}")
    r.encoding = "utf-8"
    p = r.text
    ph = etree.HTML(p)  # type: ignore

    title = ph.cssselect("h1.title")[0].text
    rh.sys_print(f"#> title: {title}.")

    al = ph.cssselect("div#pages > a")  # 选择页面底部分页导航链接
    page_count = int(al[-2].text)
    rh.sys_print(f"-> image count: {page_count}")

    valid_name = rh.file_name_regulate(title).strip()
    cache_path = CACHE_FOLDER / (valid_name + ".json")
    if cache_path.exists():
        rh.sys_print("-> found task cache, load it")
        jt = cache_path.read_text()
        return jpkl.loads(jt)  # type: ignore

    img_page_list = build_page_link_list(link, page_count)

    rh.sys_print("-> getting image pages...")

    task_list = []
    # 访问每个页面，获取图片链接
    for page_link in tqdm(img_page_list):
        if page_link == link:
            img_page = ph
        else:
            link_crc = crc32c(page_link.encode("utf-8"))
            page_cache_path = CACHE_FOLDER / (valid_name + f" % {link_crc}.html")

            if page_cache_path.exists():
                img_page = etree.HTML(page_cache_path.read_text())  # type: ignore
            else:
                r = s.get(page_link)
                if not r.ok:
                    rh.sys_print(f"?> Something error fetching image page: {link} with code {r.status_code}")
                    rh.sys_print(f"?> Skip")
                    continue
                else:
                    img_page = etree.HTML(r.text)  # type: ignore
                    if img_page is None:
                        rh.sys_print(f"!> error on link: {page_link}")
                        print()
                        print(r.text)
                    else:
                        page_cache_path.write_text(r.text)

        img = img_page.cssselect("div.pictures > img")
        if len(img) == 0:
            rh.sys_print(f"?> Error fetching image: {link}, no image")
            rh.sys_print(f"?> Skip")
            continue
        elif len(img) > 1:
            rh.sys_print(f"?> More than one image selected: {link}")
            rh.sys_print(f"?> Choose [0]")

        img_link = img[0].attrib["src"]
        task_list.append(ImageTask(img_link, ".webp", {"Referer": page_link}))

        time.sleep(0.5)

    pr = PageResult(link, title, task_list)
    jt: str = jpkl.dumps(pr)  # type: ignore
    cache_path.write_text(jt)
    return pr


def get_artist_links(link: str, s: Session):
    link_crc = crc32c(link.encode("utf-8"))
    cache_path = CACHE_FOLDER / f"{link_crc}.json"
    if cache_path.exists():
        rh.sys_print("-> load cache...")
        text = cache_path.read_text()
        return jpkl.loads(text)

    rh.sys_print("-> getting page...")
    r = s.get(link)
    assert r.ok
    ph = etree.HTML(r.text)  # type: ignore
    name = ph.cssselect("div.album_info h1")[0].text
    rh.sys_print(f"#> Name: {name}")

    al = ph.cssselect("div.pages a")
    assert len(al) >= 1
    page_count = int(al[-2].text)
    page_list = build_page_link_list(link, page_count)

    host = link[: link.find(".com") + 4]
    link_list = []
    for p in tqdm(page_list):
        if p == link:
            page = ph
        else:
            r = s.get(p)
            assert r.ok
            page = etree.HTML(r.text)  # type: ignore

        al = page.cssselect("div.pic-list ul li a")
        assert len(al) > 0
        link_on_page = [f'{host}{a.attrib["href"]}' for a in al]
        link_list.extend(link_on_page)
    t = jpkl.dumps(link_list)
    cache_path.write_text(t)
    return link_list


def clear_cache(*, exclude_json=True):
    ext = [".html"]
    if not exclude_json:
        ext.append(".json")

    for f in CACHE_FOLDER.iterdir():
        if f.name.endswith(tuple(ext)):
            os.remove(f)


def search_text_in_list(target: Optional[str], l: List[str]) -> Iterable[str]:
    if target is None:
        return None

    return filter(lambda s: target in s, l)


def download_artist(session, link: str, skip_to=None, exclude_link=None):
    ll = get_artist_links(link, session)
    print(f"Album Count: {len(ll)}")
    print()

    if skip_to:
        for i in range(1):
            rr = list(search_text_in_list(skip_to, ll))
            if len(rr) < 1:
                print("Skip To Nothing")
                input()
                break

            if len(rr) > 1:
                print("Skip To First")

            skip_to_index = ll.index(rr[0])
            ll = ll[skip_to_index + 1 :]

        print(f"Download Count: {len(ll)}")

    for link in ll:
        if exclude_link is not None and link in exclude_link:
            continue

        pr = analyze_se_page(link, s)
        download_all_img(pr, s)
        print()

    clear_cache()


def download_album(session, link):
    pr = analyze_se_page(link, s)
    download_all_img(pr, s)


if __name__ == "__main__":
    s = create_default_session()
    # download_album(s, );
    exclude = [
        "https://meijuntu.com/beauty/espasia_korea_ehc_054_purm_45p.html",
    ]
    download_artist(s, "https://meijuntu.com/model/zia.html", exclude_link=exclude)
