# 爬取对战双方、比分，最后返回json
from playwright.sync_api import sync_playwright
import requests
import re
import ast
import sys
import subprocess
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
import time  # 添加时间模块用于计算运行时间
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import lru_cache
import threading


# 创建全局会话和锁
global_session = None
session_lock = threading.Lock()

def get_global_session():
    """获取全局会话对象"""
    global global_session
    with session_lock:
        if global_session is None:
            global_session = requests.Session()
            # 设置全局请求头
            global_session.headers.update({
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Referer': 'https://vip.titan007.com/',
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
                'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2'
            })
        return global_session

def ensure_playwright_installed():
    """确保 Playwright 浏览器已安装"""
    try:
        # 尝试导入并使用 Playwright
        with sync_playwright() as p:
            browser = p.chromium.launch(headless=True)
            browser.close()
        return True
    except Exception as e:
        print(f"Playwright 未正确安装: {e}")
        return False

def install_playwright():
    """安装 Playwright 浏览器"""
    try:
        print("正在安装 Playwright 浏览器...")
        subprocess.check_call([sys.executable, "-m", "playwright", "install", "chromium"])
        print("Playwright 浏览器安装完成")
        return True
    except Exception as e:
        print(f"Playwright 安装失败: {e}")
        return False

def pare_url(url, pageNo=1):
    # 假设这是你想要替换的变量值
    new_p_value = str(pageNo)  # 替换为你实际需要的值

    # 解析目标URL
    parsed_url = urlparse(url)

    # 获取查询参数字典
    query_params = parse_qs(parsed_url.query)

    # 修改p参数的值
    query_params['p'] = [new_p_value]

    # 重新构建查询字符串
    new_query_string = urlencode(query_params, doseq=True)

    # 重新构建URL
    modified_url = urlunparse((
        parsed_url.scheme,
        parsed_url.netloc,
        parsed_url.path,
        parsed_url.params,
        new_query_string,
        parsed_url.fragment
    ))

    return modified_url

@lru_cache(maxsize=128)
def extract_js_data_with_regex(content):
    """
    使用正则表达式精确提取JavaScript变量数据
    使用LRU缓存避免重复解析相同内容
    """
    try:
        # 精确匹配goalPageInfo变量赋值
        goal_page_info_pattern = r'var\s+goalPageInfo\s*=\s*(\[.*?\])\s*;'
        goal_page_info_match = re.search(goal_page_info_pattern, content, re.DOTALL)
        
        # 精确匹配goalPageData变量赋值
        goal_page_data_pattern = r'var\s+goalPageData\s*=\s*(\[[\s\S]*?\])\s*;'
        goal_page_data_match = re.search(goal_page_data_pattern, content, re.DOTALL)
        
        goalPageInfo = None
        goalPageData = None
        
        # 提取并解析goalPageInfo
        if goal_page_info_match:
            info_str = goal_page_info_match.group(1)
            try:
                # 替换单引号为双引号，兼容Python语法
                info_str = info_str.replace("'", '"').replace('\\', '')
                goalPageInfo = ast.literal_eval(info_str)
            except Exception as e:
                print(f"解析goalPageInfo失败: {e}")
        
        # 提取并解析goalPageData
        if goal_page_data_match:
            data_str = goal_page_data_match.group(1)
            try:
                # 替换单引号为双引号，清理特殊字符
                data_str = data_str.replace("'", '"').replace('\\', '').replace('\t', '')
                # 处理JavaScript关键字
                data_str = data_str.replace('null', 'None').replace('true', 'True').replace('false', 'False')
                goalPageData = ast.literal_eval(data_str)
                # print(goalPageData)
            except Exception as e:
                print(f"解析goalPageData失败: {e}")
        
        return goalPageInfo, goalPageData
        
    except Exception as e:
        print(f"正则表达式提取过程中出错: {e}")
        return None, None

def process_page_data(target_url):
    """处理单个页面的数据，使用全局会话"""
    try:
        # 使用全局会话复用连接
        session = get_global_session()
        response = session.get(target_url, timeout=6)  # 进一步减少超时时间
        
        if response.status_code == 200:
            # 强制使用GBK编码处理中文（解决乱码问题）
            try:
                content = response.content.decode('GBK', errors='ignore')
            except UnicodeDecodeError:
                # 备选编码方案
                content = response.content.decode('utf-8', errors='ignore')
            
            # 清理乱码字符和特殊符号
            content = content.replace('\r', '').replace('\n', ' ')
            
            # 使用正则表达式提取JavaScript数据
            goalPageInfo, goalPageData = extract_js_data_with_regex(content)
            
            if goalPageInfo is None or goalPageData is None:
                return []
            
            # 处理比赛数据
            matches = []
            for item in goalPageData:
                try:
                    # 基础数据提取与清洗
                    mid = item[0] if len(item) > 0 else None
                    
                    league = item[3].split('^')[0].strip() if len(item) > 3 else "未知联赛"
                    midtime = "未知时间"
                    if len(item) > 4 and item[4]:
                        try:
                            # 提取日期部分并转换格式
                            date_part = item[4].split(' ')[0]  # 获取日期部分 "2025/10/08"
                            midtime = date_part.replace('/', '-')  # 转换为 "2025-10-08"
                        except:
                            midtime = "未知时间"

                    # 处理对战双方（解决名称中的乱码残留）
                    home_team = item[10].split('^')[0].strip() if len(item) > 10 else "未知主队"
                    away_team = item[11].split('^')[0].strip() if len(item) > 11 else "未知客队"
                    parties = f"{home_team} VS {away_team}"
                    
                    # 处理比分（确保是数字）
                    end_home = str(item[12]) if len(item) > 12 and isinstance(item[12], int) else "0"
                    end_away = str(item[13]) if len(item) > 13 and isinstance(item[13], int) else "0"
                    end_half = f"{end_home}:{end_away}"
                    
                    first_home = str(item[14]) if len(item) > 14 and isinstance(item[14], int) else "0"
                    first_away = str(item[15]) if len(item) > 15 and isinstance(item[15], int) else "0"
                    first_half = f"({first_home}:{first_away})"


                    result = item[16] if len(item) > 16 else "未定"

                    # # 处理早盘(5,6,7)
                    # first_part = item[5] if len(item) > 5 else "未定"
                    # mid_part = item[6] if len(item) > 6 else "未定"
                    # last_part = item[7] if len(item) > 7 else "未定"

                    # # 如果mid_part等于0.25，就等于平/半，等于0.5，就等于半球
                    # mid_part_mapping = {
                    #     '-0.25': " 受让平球/半球 ",
                    #     '-0.5': " 受让半球 ",
                    #     '-0.75': " 受让半球/一球 ",
                    #     '-1': " 受让一球 ",
                    #     '-1.25': " 受让一球/球半 ",
                    #     '-1.5': " 受让球半 ",
                    #     '-1.75': " 受让球半/两球 ",
                    #     '-2': " 受让两球 ",
                    #     '-2.25': " 受让两球/两球半 ",
                    #     '-2.5': " 受让两球半 ",
                    #     '-2.75': " 受让两球半/三球 ",
                    #     '-3': " 受让三球 ",
                    #     '-3.25': " 受让三球/三球半 ",
                    #     '-3.5': " 受让三球半 ",
                    #     '-3.75': " 受让三球半/四球 ",
                    #     '-4': " 受让四球 ",
                    #     '-4.25': " 受让四球/四球半 ",
                    #     '-4.5': " 受让四球半 ",
                    #     '-4.75': " 受让四球半/五球 ",
                    #     '-5': " 受让五球 ",
                    #     '-5.25': " 受让五球/五球半 ",
                    #     '-5.5': " 受让五球半 ",
                    #     '-5.75': " 受让五球半/六球 ",
                    #     '-6': " 受让六球 ",
                    #     '-6.25': " 受让六球/六球半 ",
                    #     '-6.5': " 受让六球半 ",
                    #     '-6.75': " 受让六球半/七球 ",
                    #     '-7': " 受让七球 ",
                    #     '0': " 平手 ",
                    #     '0.25': " 平手/半球 ",
                    #     '0.5': " 半球 ",
                    #     '0.75': " 半球/一球 ",
                    #     '1': " 一球 ",
                    #     '1.25': " 一球/球半 ",
                    #     '1.5': " 球半 ",
                    #     '1.75': " 球半/两球 ",
                    #     '2': " 两球 ",
                    #     '2.25': " 两球/两球半 ",
                    #     '2.5': " 两球半 ",
                    #     '2.75': " 两球半/三球 ",
                    #     '3': " 三球 ",
                    #     '3.25': " 三球/三球半 ",
                    #     '3.5': " 三球半 ",
                    #     '3.75': " 三球半/四球 ",
                    #     '4': " 四球 ",
                    #     '4.25': " 四球/四球半 ",
                    #     '4.5': " 四球半 ",
                    #     '4.75': " 四球半/五球 ",
                    #     '5': " 五球 ",
                    #     '5.25': " 五球/五球半 ",
                    #     '5.5': " 五球半 ",
                    #     '5.75': " 五球半/六球 ",
                    #     '6': " 六球 ",
                    #     '6.25': " 六球/六球半 ",
                    #     '6.5': " 六球半 ",
                    #     '6.75': " 六球半/七球 ",
                    #     '7': " 七球 ",
                    #     "NaN": " 未定 ",
                    # }

                    # mid_part = mid_part_mapping.get(mid_part, str(mid_part))
                    # morningSeesion = first_part + mid_part + last_part


                    
                    # 构造比赛数据字典
                    match_data = {
                        "mid": str(mid),
                        # "morning_seesion": morningSeesion,
                        "parties": parties,
                        "end_half": end_half,
                        "first_half": first_half,
                        "result": result,
                        "midtime": midtime,
                        "league": league,
                    }
                    matches.append(match_data)
                except (IndexError, TypeError) as e:
                    continue
            
            return matches
        else:
            print(f"请求失败，状态码: {response.status_code}")
            return []
    except Exception as e:
        print(f"处理请求时出错: {e}")
        return []

def get_goal_data_urls(t, sid, cid):
    """获取目标请求URL（优化版本）"""
    goal_data_urls = []
    
    def handle_response(response):
        if "goalData.aspx" in response.url:
            goal_data_urls.append(response.url)
    
    with sync_playwright() as p:
        # 优化浏览器启动参数
        browser = p.chromium.launch(
            headless=True,
            args=[
                '--disable-gpu',
                '--disable-dev-shm-usage',
                '--disable-setuid-sandbox',
                '--no-first-run',
                '--no-sandbox',
                '--no-zygote',
                '--single-process'
            ]
        )
        page = browser.new_page()
        
        page.on("response", handle_response)
        
        url = f'https://vip.titan007.com/count/goalCount.aspx?t={t}&sid={sid}&cid={cid}&l=0'
        print(f"正在访问: {url}")
        
        try:
            # 使用全局会话检查URL
            session = get_global_session()
            response = session.get(url, timeout=8)
            
            if response.text.strip() == "":
                print("响应内容为空，请检查请求是否成功。")
                browser.close()
                return []
        except Exception as e:
            print(f"请求失败: {e}")
            browser.close()
            return []
        
        try:
            # 优化页面加载参数
            page.goto(url, wait_until="domcontentloaded", timeout=15000)
            page.wait_for_timeout(500)  # 大幅减少等待时间
        except Exception as e:
            print(f"页面加载超时或出错: {e}")
        finally:
            browser.close()
    
    return goal_data_urls

def crawl_match_data(t,sid, cid):
    """
    爬取比赛数据函数
    参数:
        sid: 比赛相关sid参数
        cid: 比赛相关cid参数
    返回:
        包含比赛数据的列表，每个元素为一场比赛的字典
    """
    start_time = time.time()  # 记录开始时间
    print(f"开始执行 crawl_match_data 方法，参数: t={t}, sid={sid}, cid={cid}")
    
    # 检查并安装 Playwright
    if not ensure_playwright_installed():
        if not install_playwright():
            raise Exception("无法安装 Playwright 浏览器")
        
        # 再次检查
        if not ensure_playwright_installed():
            raise Exception("Playwright 浏览器安装后仍然无法使用")
    
    all_matches = []
    
    # 获取目标URL
    goal_data_urls = get_goal_data_urls(t,sid, cid)
    
    # 处理捕获到的目标请求
    if not goal_data_urls:
        print("未捕获到目标请求URL，请检查网络或参数设置")
        return []
    
    print(f"共捕获到 {len(goal_data_urls)} 个目标请求URL")
    
    # 使用线程池并发处理多个URL，进一步增加最大工作线程数
    with ThreadPoolExecutor(max_workers=15) as executor:  # 增加到15个线程
        future_to_url = {}
        
        for target_url in goal_data_urls:
            # 先获取第一页来解析goalPageInfo确定总页数
            try:
                # 使用全局会话复用连接
                session = get_global_session()
                response = session.get(target_url, timeout=6)
                
                if response.status_code == 200:
                    # 强制使用GBK编码处理中文（解决乱码问题）
                    try:
                        content = response.content.decode('GBK', errors='ignore')
                    except UnicodeDecodeError:
                        # 备选编码方案
                        content = response.content.decode('utf-8', errors='ignore')
                    
                    # 清理乱码字符和特殊符号
                    content = content.replace('\r', '').replace('\n', ' ')
                    
                    # 使用正则表达式提取JavaScript数据
                    goalPageInfo, goalPageData = extract_js_data_with_regex(content)
                    
                    # 确定总页数，默认为1页
                    total_pages = 1
                    if goalPageInfo and len(goalPageInfo) > 0:
                        # print(type(goalPageInfo[0]))
                        # 确保第一个元素是数字类型
                        if isinstance(goalPageInfo[0], (int, float)):
                            total_pages = int(goalPageInfo[0])
                    
                    print(f"URL {target_url} 总共 {total_pages} 页")
                    
                    # 根据总页数动态创建任务
                    for page_num in range(1, total_pages + 1):
                        page_url = pare_url(target_url, page_num)
                        future = executor.submit(process_page_data, page_url)
                        future_to_url[future] = page_url
                else:
                    print(f"请求失败，状态码: {response.status_code}")
                    # 至少处理第一页
                    first_page_url = pare_url(target_url, 1)
                    future = executor.submit(process_page_data, first_page_url)
                    future_to_url[future] = first_page_url
                    
            except Exception as e:
                print(f"获取URL {target_url} 页数信息时出错: {e}")
                # 出错时至少处理前两页
                first_page_url = pare_url(target_url, 1)
                future = executor.submit(process_page_data, first_page_url)
                future_to_url[future] = first_page_url
                
                second_page_url = pare_url(target_url, 2)
                future = executor.submit(process_page_data, second_page_url)
                future_to_url[future] = second_page_url
        
        # 收集所有结果，设置更严格的超时时间
        for future in as_completed(future_to_url, timeout=20):
            try:
                matches = future.result(timeout=10)
                if matches:
                    all_matches.extend(matches)
            except Exception as e:
                url = future_to_url[future]
                print(f"处理URL {url} 时出错: {e}")
        end_time = time.time()  # 记录结束时间
        execution_time = end_time - start_time  # 计算执行时间
        print(f"crawl_match_data 方法执行完成，总耗时: {execution_time:.2f} 秒")
        print(f"总共获取到 {len(all_matches)} 场比赛数据")
        
        return all_matches

# 使用示例
if __name__ == "__main__":
    try:
        start_time = time.time()
        # 调用函数爬取数据
        matches_data = crawl_match_data(t = '1',sid='2726368', cid='3')
        print(f"最终获取到 {len(matches_data)} 场比赛数据，总耗时: {time.time() - start_time:.2f} 秒")
        
        if matches_data:
            print(f"\n总共获取到 {len(matches_data)} 场比赛数据")
            # 打印所有比赛数据
            # print(matches_data[0])
            # for i, match in enumerate(matches_data, 1):
            #     print(match)
            # 写入到json文件
            import json
            with open('matches_data.json', 'w', encoding='utf-8') as f:
                json.dump(matches_data, f, ensure_ascii=False, indent=4)
        else:
            print("未获取到任何比赛数据")
    except Exception as e:
        print(f"程序运行出错: {e}")