import argparse
import datetime
import random
import time
import typing

import requests
from bs4 import BeautifulSoup

from tabulate import tabulate
from datetime import datetime, timedelta

# d[1]:昵称; d[2]:金额; d[4]:等级; d[5]:物品公示期结束时间
name_idx = 1
price_idx = 2
level_idx = 4
can_by_time_idx = 5


def get_arguments():
    """Parse all the arguments provided from the CLI.

    Returns:
      A list of parsed arguments.
    """
    parser = argparse.ArgumentParser(description="MOE")
    parser.add_argument("--quality", type=str, default='p')
    parser.add_argument("--start-page", type=int, default=1)
    parser.add_argument("--end-page", type=int, default=100)
    parser.add_argument("--all", action='store_true')
    parser.add_argument("--gap", type=float, default=5.0)
    parser.add_argument("--time_limit", type=int, default=24)

    return parser.parse_args()


def get_raw_html_origin(url: str, verbose: bool = True) -> typing.Optional[str]:
    """
    根据url获取html文档
    :param verbose: 是否繁杂输出
    :param url: url地址
    :return: html文档
    """
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, compress',
        'Accept-Language': 'en-us;q=0.5,en;q=0.3',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'
    }
    try:
        r = requests.get(url=url, headers=headers, timeout=5)
        r.raise_for_status()
        return r.text
    except Exception as e:
        if verbose:
            print(e.args)
        return None


def get_horse_data(quality, pg=1, all=False, time_limit=24):
    if quality == 'j':
        keys = ['追', '8282', '8284', '8482', '8484', '82 82', '82 84', '84 82', '84 84', '82s82c', '82s84c', '84s82c', '84s84c', '觉醒']
    else:
        keys = [
        '金',
        '冲',
        '的卢',
        '草', 'cao', '艹', '屮',
        '神威',
        '名种',
        '力大无穷'
        ]
    not_keys = [
        '黑', '白', '栗', '褐', '杂'
    ]

    ruler_dict = {
        'p': '216173881625411584',
        'g': '288231475663339520',
        'j': '288231475663339520'
    }

    url_template = f'https://bili.bi/horse-store-data/?Ruler={ruler_dict[quality]}&PageNo={pg}'
    text = get_raw_html_origin(url_template)
    if text is None:
        print(f'error get page {pg}')
        return None
    soup = BeautifulSoup(text, 'lxml')
    table = soup.find_all('table')[0]

    data = []
    rows = table.find_all('tr')
    for row in rows:
        cols = row.find_all(['td', 'th'])  # 获取每一行中的所有td或th元素
        cols_text = [ele.text.strip() for ele in cols]  # 提取文本并去除多余的空格
        data.append(cols_text)
    if len(data) == 1:
        return None

    processed_data = [['昵称', '金额', '等级', '时间差']]
    for d in data[1:]:
        can_by_time = d[can_by_time_idx]
        # 定义给定的时间
        given_time = datetime.strptime(can_by_time, "%Y-%m-%d %H:%M:%S")
        # 获取当前时间
        current_time = datetime.now()
        # 计算时间差
        time_difference = given_time - current_time
        minutes_difference = int(time_difference.total_seconds() // 60)
        if minutes_difference < 0:
            previous_time = current_time - timedelta(hours=time_limit)
            if given_time < previous_time:
                continue
            left_hours = (current_time - given_time).total_seconds() // (60 * 60)
            left_hours = int(left_hours)
            d[can_by_time_idx] = f'{left_hours} h'
        else:
            d[can_by_time_idx] = f'需等{minutes_difference} min'
        d[level_idx] = int(d[level_idx])
        if all:
            processed_data.append([d[name_idx], d[price_idx], d[level_idx], d[can_by_time_idx]])
        else:
            name = d[name_idx]
            skip = False
            for key in not_keys:
                if name.find(key) != -1:
                    skip = True
                    break
            if skip:
                continue
            for key in keys:
                if name.find(key) != -1:
                    processed_data.append([d[name_idx], d[price_idx], d[level_idx], d[can_by_time_idx]])
                    break

    return processed_data


def get_horse(args):
    all_data = []
    time_gap = args.gap
    for page in range(args.start_page, args.end_page):
        print('*' * 30 + f'page={page}' + '*' * 30)
        data = get_horse_data(args.quality, pg=page, all=args.all, time_limit=args.time_limit)
        if data is None:
            print('*' * 30 + f'page={page}[end]' + '*' * 30)
            break
        if len(data) != 1:
            print(tabulate(data, headers="firstrow", tablefmt="grid"))
            all_data += data[1:]

        time.sleep(random.uniform(time_gap * 0.5, time_gap * 1.5))
    all_data.sort(key=lambda x: x[2])
    print('\n\n' + '*' * 30 + f'final' + '*' * 30)
    print(tabulate(all_data, headers=['昵称', '金额', '等级', '时间差'], tablefmt="grid"))


if __name__ == '__main__':
    # pip install beautifulsoup4 lxml tabulate
    # cd /Users/jiangjingjing/Desktop/utils/games/ && python moe.py --quality p --start-page 1 --end-page 30 --all

    args = get_arguments()
    get_horse(args)
