import os
import requests
import pandas as pd
import time
import json
from datetime import datetime

def get_total_ssq_count():
    """获取双色球总期数"""
    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Accept-Language": "en-US,en;q=0.5",
        "Connection": "keep-alive",
        "Cookie": "HMF_CI=0a1ae282ec0116372d93f5f85c031b0a3dd9f0132240139eb3658000c776e76285ed76d9599e262b46532c0655ace5923db527bcc87f22e57f1d8c6d42200c3d9b; 21_vq=12",
        "Host": "www.cwl.gov.cn",
        "Priority": "u=0",
        "Referer": "https://www.cwl.gov.cn/ygkj/wqkjgg/ssq/",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0",
        "X-Requested-With": "XMLHttpRequest"
    }
    
    try:
        base_url = "https://www.cwl.gov.cn/cwl_admin/front/cwlkj/search/kjxx/findDrawNotice"
        params = {
            "name": "ssq",
            "issueCount": "",
            "issueStart": "",
            "issueEnd": "",
            "dayStart": "",
            "dayEnd": "",
            "pageNo": "1",
            "pageSize": "1",
            "week": "",
            "systemType": "PC"
        }
        
        response = requests.get(base_url, headers=headers, params=params, timeout=15)
        if response.status_code == 200:
            result = response.json()
            if result.get("state") == 0:
                total_count = result.get("total", 0)
                print(f"双色球总期数: {total_count}")
                return total_count
        return 0
    except Exception as e:
        print(f"获取总期数失败: {str(e)}")
        return 0

def get_existing_periods(data_dir='data', main_filename='ssq_data_20250912_075157.csv'):
    """获取本地已存在的期数列表"""
    existing_periods = set()
    
    if not os.path.exists(data_dir):
        return existing_periods
    
    # 主要检查主文件
    main_filepath = os.path.join(data_dir, main_filename)
    if os.path.exists(main_filepath):
        try:
            df = pd.read_csv(main_filepath)
            if '期号' in df.columns:
                periods = df['期号'].astype(str).tolist()
                existing_periods.update(periods)
        except Exception as e:
            print(f"读取主文件 {main_filename} 时出错: {str(e)}")
    
    print(f"本地已存在 {len(existing_periods)} 期数据")
    return existing_periods

def get_missing_ssq_data(existing_periods, total_count):
    """获取缺失的期数数据"""
    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Accept-Language": "en-US,en;q=0.5",
        "Connection": "keep-alive",
        "Cookie": "HMF_CI=0a1ae282ec0116372d93f5f85c031b0a3dd9f0132240139eb3658000c776e76285ed76d9599e262b46532c0655ace5923db527bcc87f22e57f1d8c6d42200c3d9b; 21_vq=12",
        "Host": "www.cwl.gov.cn",
        "Priority": "u=0",
        "Referer": "https://www.cwl.gov.cn/ygkj/wqkjgg/ssq/",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0",
        "X-Requested-With": "XMLHttpRequest"
    }
    
    try:
        base_url = "https://www.cwl.gov.cn/cwl_admin/front/cwlkj/search/kjxx/findDrawNotice"
        all_data = []
        page = 1
        has_more = True
        page_size = 50
        
        while has_more:
            params = {
                "name": "ssq",
                "issueCount": "",
                "issueStart": "",
                "issueEnd": "",
                "dayStart": "",
                "dayEnd": "",
                "pageNo": str(page),
                "pageSize": str(page_size),
                "week": "",
                "systemType": "PC"
            }
            
            print(f"正在获取第 {page} 页数据...")
            
            response = requests.get(base_url, headers=headers, params=params, timeout=15)
            
            if response.status_code != 200:
                print(f"请求失败，状态码: {response.status_code}")
                break
            
            try:
                result = response.json()
                if result.get("state") == 0 and "result" in result:
                    data_list = result["result"]
                    
                    if not data_list:
                        has_more = False
                        break
                    
                    new_data_count = 0
                    for item in data_list:
                        period = item.get("code", "")
                        
                        # 如果这期数据已经存在，跳过
                        if period in existing_periods:
                            continue
                        
                        # 解析新数据
                        data_item = {
                            '期号': period,
                            '开奖日期': item.get("date", ""),
                            '红球': " ".join(item.get("red", "").split(",")) if item.get("red") else "",
                            '蓝球': item.get("blue", ""),
                            '一等奖注数': item.get("prizegrades", [{}])[0].get("typenum", "0") if item.get("prizegrades") else "0",
                            '一等奖金额(元)': item.get("prizegrades", [{}])[0].get("typemoney", "0") if item.get("prizegrades") else "0",
                            '二等奖注数': item.get("prizegrades", [{}])[1].get("typenum", "0") if item.get("prizegrades") and len(item.get("prizegrades", [])) > 1 else "0",
                            '二等奖金额(元)': item.get("prizegrades", [{}])[1].get("typemoney", "0") if item.get("prizegrades") and len(item.get("prizegrades", [])) > 1 else "0",
                            '销售额(元)': item.get("sales", ""),
                            '奖池金额(元)': item.get("poolmoney", "")
                        }
                        all_data.append(data_item)
                        new_data_count += 1
                    
                    print(f"第 {page} 页获取到 {len(data_list)} 条数据，其中 {new_data_count} 条是新数据")
                    
                    if new_data_count == 0:
                        print("没有更多新数据，停止获取")
                        has_more = False
                        break
                    
                    page += 1
                    time.sleep(1)
                    
                else:
                    print(f"API返回错误: {result.get('message', '未知错误')}")
                    has_more = False
                    
            except json.JSONDecodeError as e:
                print(f"JSON解析错误: {e}")
                has_more = False
                
        return all_data
        
    except Exception as e:
        print(f"获取数据时出错: {str(e)}")
        return None

def insert_data_after_header(data, main_filename='ssq_data_20250912_075157.csv'):
    """将新数据插入到CSV文件的第二行（表头之后）"""
    if not data:
        return False
    
    os.makedirs('data', exist_ok=True)
    main_filepath = os.path.join('data', main_filename)
    
    # 转换新数据为DataFrame
    new_df = pd.DataFrame(data)
    
    if os.path.exists(main_filepath):
        try:
            # 读取现有数据
            existing_df = pd.read_csv(main_filepath)
            
            # 确保列顺序一致
            new_df = new_df[existing_df.columns]
            
            # 将新数据插入到第二行位置
            combined_df = pd.concat([
                new_df,                # 新数据
                existing_df.iloc[0:]   # 原有数据（从第二行开始）
            ], ignore_index=True)
            
            # 保存文件
            combined_df.to_csv(main_filepath, index=False, encoding='utf-8-sig')
            print(f"数据已插入到 {main_filepath} 的第二行，共 {len(combined_df)} 条记录")
            
        except Exception as e:
            print(f"插入数据时出错: {str(e)}")
            # 如果出错，尝试简单追加
            try:
                combined_df = pd.concat([existing_df, new_df], ignore_index=True)
                combined_df.to_csv(main_filepath, index=False, encoding='utf-8-sig')
                print(f"数据已追加到 {main_filepath}，共 {len(combined_df)} 条记录")
            except Exception as e2:
                print(f"追加数据也失败: {str(e2)}")
                return False
    else:
        # 如果文件不存在，创建新文件
        new_df.to_csv(main_filepath, index=False, encoding='utf-8-sig')
        print(f"创建新文件 {main_filepath}，共 {len(new_df)} 条记录")
    
    return True

def save_backup_file(data):
    """保存备份文件"""
    if data:
        timestamp = time.strftime("%Y%m%d_%H%M%S")
        filename = f"ssq_data_{timestamp}.csv"
        filepath = os.path.join('data', filename)
        
        df = pd.DataFrame(data)
        df.to_csv(filepath, index=False, encoding='utf-8-sig')
        print(f"备份数据已保存到 {filepath}")
        return True
    return False

def main():
    print("开始检查双色球数据完整性...")
    
    # 获取总期数
    total_count = get_total_ssq_count()
    if total_count == 0:
        print("无法获取总期数，将尝试获取所有数据")
        total_count = 3000
    
    # 获取本地已存在的期数
    existing_periods = get_existing_periods()
    print(f"总期数: {total_count}, 本地已有: {len(existing_periods)}")
    
    if len(existing_periods) >= total_count:
        print("本地数据已经是最新的，无需更新")
        return
    
    # 获取缺失的数据
    missing_data = get_missing_ssq_data(existing_periods, total_count)
    
    if missing_data:
        print(f"成功获取 {len(missing_data)} 条缺失数据")
        
        # 按开奖日期排序（从新到旧）
        missing_data.sort(key=lambda x: x['开奖日期'], reverse=True)
        
        # 保存备份文件
        # save_backup_file(missing_data)
        
        # 插入到主文件的第二行
        success = insert_data_after_header(missing_data)
        
        # 打印新获取的数据
        print("\n新获取的数据预览（按时间倒序）:")
        for i, item in enumerate(missing_data[:5]):
            print(f"{i+1}. 期号: {item['期号']}, 日期: {item['开奖日期']}, 红球: {item['红球']}, 蓝球: {item['蓝球']}")
        
        if success:
            print(f"\n数据更新完成！新增 {len(missing_data)} 条记录，已插入到主文件的第二行")
        else:
            print("\n数据保存过程中出现错误")
    else:
        print("未能获取缺失数据")

if __name__ == "__main__":
    main()