import queue
from datetime import datetime, timedelta
import time
from threading import Thread
import requests
import json
import redis

redis_client = redis.StrictRedis()

# 读取 JSON 配置文件，确保文件路径正确
try:
    with open('../json/bilibili_bulk_API.json', 'r') as f:
        json_data = json.loads(f.read())  # 读取数据
except FileNotFoundError:
    print("JSON 配置文件未找到！")
    exit(1)
except json.JSONDecodeError:
    print("JSON 文件格式错误！")
    exit(1)

# API 配置
url = ''
header = {
}  # 请求头配置


def all_bulk_info(task_queue, params):
    """获取所有信息"""
    one, classify, time_to, time_from, cate_id = params['one'], params['classify'], params['time_to'], params[
        'time_from'], params['cate_id']

    while task_queue.qsize() > 0:
        page = task_queue.get()  # 获取页数
		# 要传入的数据
        data = {
            'main_ver': 'v3',
            'search_type': 'video',
            'view_type': 'hot_rank',
            'copy_right': -1,
            'new_web_tag': 1,
            'order': 'click',
            'cate_id': cate_id,
            'page': page,
            'pagesize': 30,
            'time_from': time_from,
            'time_to': time_to
        }
        try:
            for _ in range(10):  # 尝试最多10次
                response = requests.get(url, params=data, headers=header)
                if response.status_code == 200:
                    data_json = response.json()
                    break
            else:
                print(f"请求失败，页面 {page} 获取失败，跳过此页。")
                continue
        except Exception as e:
            print(f"请求出错: {e}")
            task_queue.put(page)  # 请求失败时重新加入任务队列
            continue

        # 处理返回的数据
        if data_json:
            try:
                result = data_json['data']['result']
            except KeyError:
                print(f"返回的数据格式错误：{data_json}")
                continue

            if result:
                for content in result:
                    print(content)
                    try:
                        data = {
                            'one_classify': one,
                            'second_classify': classify,
                            'pubdate': content['pubdate'],
                            'duration': content['duration'],
                            'rank_score': content['rank_score'],
                            'author': content['author'],
                            'title': content['title'],
                            'arcurl': content['arcurl']
                        }
                        print(data)
                        redis_client.rpush('all_bulk_info', json.dumps(data, ensure_ascii=False))
                    except Exception as e:
                        print(f"处理数据时出错: {e}")


def info_init():
    """初始化函数，开始抓取数据"""
    for result in json_data:
        time_to = datetime.now()  # 获取当前时间
        one, classify, cate_id = result['title'], result['classify'], result['cate_id']
        result_zero_times = 0  # 连续没有结果的次数
        while True:
            time_from = time_to - timedelta(days=93)  # 时间范围是 93 天
            print(f"请求时间范围：{time_from.strftime('%Y%m%d')} 至 {time_to.strftime('%Y%m%d')}")

            data = {
                'main_ver': 'v3',
                'search_type': 'video',
                'view_type': 'hot_rank',
                'copy_right': -1,
                'new_web_tag': 1,
                'order': 'click',
                'cate_id': cate_id,
                'page': 1,
                'pagesize': 30,
                'time_from': time_from.strftime('%Y%m%d'),
                'time_to': time_to.strftime('%Y%m%d')
            }

            try:
                # 请求数据，最多尝试10次
                for _ in range(10):
                    response = requests.get(url, params=data, headers=header)
                    if response.status_code == 200:
                        data_json = response.json()
                        break
                else:
                    print(f"请求失败：{data}，跳过当前时间段数据。")
                    continue
            except Exception as e:
                print(f"请求数据时出错: {e}")
                break

            if data_json:
                num_pages = data_json['data']['numPages']  # 获取总页数
                num_results = data_json['data']['numResults']  # 获取结果总数

                if not num_results:
                    result_zero_times += 1
                    print(
                        f"未找到结果: {time_from.strftime('%Y%m%d')} 至 {time_to.strftime('%Y%m%d')}，尝试次数: {result_zero_times}/10")
                else:
                    print(f"找到结果: {num_results}，页数: {num_pages}")
                    result_zero_times = 0

                # 连续10次无结果，退出当前分类
                if result_zero_times >= 10:
                    print(f"连续 10 次没有结果，退出分类: {classify}")
                    break

                # 处理分页
                params = {
                    'one': one,
                    'classify': classify,
                    'time_to': time_to.strftime('%Y%m%d'),
                    'time_from': time_from.strftime('%Y%m%d'),
                    'cate_id': cate_id
                }
                task_queue = queue.Queue()  # 创建任务队列
                for i in range(num_pages):  # 将所有页数加入队列
                    task_queue.put(i)

                # 创建并启动多线程任务
                thread_list = []
                for _ in range(8):
                    t = Thread(target=all_bulk_info, args=(task_queue, params))
                    t.start()
                    thread_list.append(t)

                for thread in thread_list:
                    thread.join()

                time_to = time_from  # 更新时间段


if __name__ == '__main__':
    info_init()  # 初始化抓取
