import asyncio
import ssl
import certifi
from asyncio import Semaphore
import httpx, aiohttp
import json
from bs4 import BeautifulSoup
from huangG.hg import HG
from huangG.hg_config import HG_Config as config

hg = HG()

async def crawler_hg(today_lid, max_concurrent=100):
    """分批次爬取"""
    semaphore = Semaphore(max_concurrent)  # 设置最大并发数
    # 创建一个包含多个任务的列表
    conn = aiohttp.TCPConnector(ssl=False)  # 忽略证书
    async with aiohttp.ClientSession(connector=conn) as session:
        tasks = [hg.get_football_today_detail_aio(session, semaphore, lid) for lid in today_lid]
        results = await asyncio.gather(*tasks)  # 并发执行所有任务
    
    # 将 Game_Info 对象列表转换为字典列表
    for gameInfo in results:
        if None == gameInfo: continue
        gameInfo.game_odds = [game.to_dict() for game in gameInfo.game_odds]
    results = [game.to_dict() for game in results if game is not None]
    # 写入文件
    with open("data_hg3.json", "w", encoding="utf-8") as file:
        json.dump(results, file, indent=4, ensure_ascii=False)
    print("数据已成功写入 games.json 文件")
    
    
async def httpx_hg(today_lid, max_concurrent=25):
    semaphore = Semaphore(max_concurrent)  # 设置最大并发数
    async with httpx.AsyncClient(headers = config.headers) as client:
        tasks = [hg.get_football_today_detail_httpx(client, semaphore, lid) for lid in today_lid]
        results = await asyncio.gather(*tasks)  # 并发执行所有任务
    
    # 将 Game_Info 对象列表转换为字典列表
    for gameInfo in results:
        if None == gameInfo: continue
        gameInfo.game_odds = [game.to_dict() for game in gameInfo.game_odds]
    results = [game.to_dict() for game in results if game is not None]
    # 写入文件
    with open("data_hg3.json", "w", encoding="utf-8") as file:
        json.dump(results, file, indent=4, ensure_ascii=False)
    print("数据已成功写入 games.json 文件")

# 运行异步主函数
if __name__ == "__main__":
    is_uid = hg.get_login_uid(None)
    print(f" >>> uid: {is_uid}")
    if is_uid is not None:
        print("===================================================================")
        # 获取皇冠今天的所有比赛id
        today_lid = hg.query_football_today_lid()
        # 使用异步爬取皇冠数据
        # asyncio.run(crawler_hg(today_lid))
        asyncio.run(httpx_hg(today_lid))
        print(f" >>> today_lid: {len(today_lid)}")
    # asyncio.run(main())



