import time
import requests
import pandas as pd
from .soup_utils import get_url_list, get_page_max_num, get_work_info
from .spider_def import delay, headers


def get_img_page_url_list(date: str, mode: str, page_num: int) -> list[str]:
    time.sleep(delay)
    if mode not in ['weekly', 'daily', 'monthly']:
        raise Exception()
    url = f'https://www.vilipix.com/ranking?date={date}&mode={mode}&p={page_num}'
    print(f'[*] get url list from {url}')
    res = requests.get(url, headers=headers)
    if res.status_code != 200:
        raise Exception()

    max_page_num = get_page_max_num(res.text)
    if page_num > max_page_num:
        return []

    return get_url_list(res.text, '.pix-card > .title > a')


def get_img_page_info(page_url: str) -> dict:
    r"""根据url访问作品主页并收集信息。

    :param page_url: 作品主页链接，格式如‘/illust/111588052’。
    :return: 包含作品信息的字典，具体键看源码注释。
    """
    print(f'[*] looking {page_url}')
    time.sleep(delay)

    url = 'https://www.vilipix.com' + page_url
    res = requests.get(url, headers=headers)
    if res.status_code != 200:
        raise Exception()

    return get_work_info(res.text, page_url)


def get_all_img_info(date: str, mode: str, page_start: int = 1) -> list[dict]:
    r"""获取指定日期，指定栏目的所有的图片信息。

    :param date: 日期，格式如20230913。
    :param mode: 'weekly', 'daily', 'monthly‘ 三种，分别代表周榜，日榜，月榜。
    :param page_start: 开始页数，默认从第一页开始。
    :return: 包含图片信息字典的列表
    """
    result_list = []
    now_page = page_start
    while True:
        url_list = get_img_page_url_list(date, mode, now_page)
        if len(url_list) <= 0:
            break
        for url in url_list:
            result_list.append(get_img_page_info(url))
        now_page += 1
    return result_list


if __name__ == '__main__':
    info_list = get_all_img_info('20230912', 'monthly', 5)
    df1 = pd.DataFrame(info_list)
    print(df1)
    df1.to_csv('./test_info.csv')
