import os
import threading

import requests
import time
import re
from tqdm import tqdm
from bs4 import BeautifulSoup


def create_dir(dir_path):
    try:
        os.makedirs(dir_path)
    except FileExistsError:
        print(f'[*] 文件夹 {dir_path} 已存在')
    else:
        print(f'[*] 文件夹 {dir_path} 创建成功')


def get_hero_id(detail_url):
    pattern = r"/(\d+)\.shtml$"

    match = re.search(pattern, detail_url)
    res_id = match.group(1)
    return int(res_id)


def get_hero_skin_num(img_name):
    return img_name.count('|') + 1


def get_hero_skin_list(img_name):
    match = re.findall(r'[\u4e00-\u9fa5·]+', img_name)
    return match


def get_hero_skin_url(id_num, index):
    return 'https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/' \
           + str(id_num) \
           + '/' \
           + str(id_num) \
           + \
           '-bigskin-' \
           + str(index) \
           + '.jpg'


def download_file_with_bar(url, path):
    if os.path.exists(path):
        print(f'[*] 文件 {path} 已存在!')
        return
    response = requests.get(url, headers=headers, stream=True)
    total_size = int(response.headers.get('content-length', 0))

    block_size = 1024
    download_size = 0
    with open(path, 'wb') as f, tqdm(
            desc=path,
            total=total_size,
            unit='iB',
            unit_scale=True,
            unit_divisor=1024,
    ) as bar:
        for data in response.iter_content(block_size):
            bar.update(len(data))
            download_size += len(data)
            f.write(data)


def download_file(url, path):
    if os.path.exists(path):
        print(f'[*] 文件 {path} 已存在!')
        return
    response = requests.get(url, headers=headers, stream=True)

    print(f'[*] 文件 {path} 下载中...')
    block_size = 1024
    with open(path, 'wb') as f:
        for data in response.iter_content(block_size):
            f.write(data)


def download_single_thread(dld_list):
    for url, name in dld_list:
        download_file_with_bar(url, name)
        time.sleep(2)


def download_multi_thread(dld_list):
    threads = []
    for url, name in dld_list:
        thd = threading.Thread(target=download_file, args=(url, name))
        threads.append(thd)
        thd.start()
    for thread in threads:
        thread.join()


# test
save_path = 'images/'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 '
                  'Safari/537.36 '
}

print('[*] 获取英雄信息列表')
res = requests.get('https://pvp.qq.com/web201605/js/herolist.json', headers=headers)
hero_info_list = res.json()

hero_detail_urls = []
for hero_info in hero_info_list:
    hero_detail_urls.append('https://pvp.qq.com/web201605/herodetail/' + str(hero_info['ename']) + '.shtml')
print('[*] 英雄数：', len(hero_detail_urls))

create_dir(save_path)
for hero_detail_idx, hero_detail_url in enumerate(hero_detail_urls):
    hero_id = get_hero_id(hero_detail_url)

    hero_detail_res = requests.get(hero_detail_url, headers=headers)
    print('[*] 进度：', hero_detail_idx + 1, '/', len(hero_detail_urls))
    print('[*] 开始访问：', hero_detail_url)
    print('[*] 状态码：', hero_detail_res.status_code)
    if hero_detail_res.status_code != 200:
        break
    hero_detail_res.encoding = 'gbk'

    hero_detail_soup = BeautifulSoup(hero_detail_res.text, 'lxml')
    # get skin num
    img_elements = hero_detail_soup.select('.pic-pf-list.pic-pf-list3')
    img_name_str = img_elements[0].get('data-imgname')
    # get hero name
    hero_name_elements = hero_detail_soup.select('.cover > .cover-name')
    hero_name = hero_name_elements[0].text
    hero_dir = hero_name + '/'

    skin_num = get_hero_skin_num(img_name_str)
    # print(img_name_str)

    # list
    hero_skin_list = get_hero_skin_list(img_name_str)
    # print(hero_skin_list)
    print(f'[*] 英雄名：{hero_name}')
    print(f'[*] 皮肤列表：{hero_skin_list}')

    create_dir(save_path + hero_dir)

    # gen download list
    download_list = []
    for idx, skin_name in enumerate(hero_skin_list):
        download_url = get_hero_skin_url(hero_id, idx + 1)
        save_name = save_path + hero_dir + skin_name + '.jpg'
        download_list.append((download_url, save_name))

    # download
    download_multi_thread(download_list)

    time.sleep(1)

