import os
import requests
import pandas as pd
from bs4 import BeautifulSoup
import urllib3
import sys
import logging

# 禁用不安全请求警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# ---------------- 配置 ----------------
name_path = {
    "plw": {
        "name": "排列5",
        "path": "./"  # 训练脚本运行目录
    }
}
data_file_name = "plw_history.csv"

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s | %(levelname)s | %(message)s",
    handlers=[
        logging.StreamHandler(sys.stdout)
    ]
)

def get_url(name):
    """
    构建数据爬取的URL
    :param name: 玩法名称 ('plw')
    :return: (url, path)
    """
    url = f"https://datachart.500.com/{name}/history/"
    path = "inc/history.php?start={}&end="
    return url, path

def get_current_number(name):
    """
    获取最新一期的期号
    :param name: 玩法名称 ('plw') https://datachart.500.com/plw/history/inc/history.php?limit=29
    :return: current_number (字符串)  
    """
    url, _ = get_url(name)
    full_url = f"{url}inc/history.php?limit=29"
    logging.info(f"Fetching URL: {full_url}")
    try:
        response = requests.get(full_url, verify=False, timeout=20)
        if response.status_code != 200:
            logging.error(f"Failed to fetch data. Status code: {response.status_code}")
            sys.exit(1)
        response.encoding = "gb2312"
        soup = BeautifulSoup(response.text, "lxml")
        logging.info(f"Fetching URL: {soup}")
        # 根据实际网页结构调整
        current_num_input = soup.find("input", id="end")
        if not current_num_input:
            logging.error("Could not find the 'end' input element on the page.")
            sys.exit(1)
        current_num = current_num_input.get("value", "").strip()
        if not current_num:
            logging.error("The 'end' input element does not have a 'value' attribute.")
            sys.exit(1)
        logging.info(f"最新一期期号：{current_num}")
        return current_num
    except requests.exceptions.RequestException as e:
        logging.error(f"Error fetching current number: {e}")
        sys.exit(1)
    except Exception as e:
        logging.error(f"Unexpected error: {e}")
        sys.exit(1)

def spider(name, start, end):
    """
    爬取历史数据
    :param name: 玩法名称 ('plw')
    :param start: 开始期数
    :param end: 结束期数
    :return: DataFrame
    """
    url, path = get_url(name)
    full_url = f"{url}{path.format(start)}{end}"
    logging.info(f"Fetching URL: {full_url}")
    try:
        response = requests.get(full_url, verify=False, timeout=10)
        if response.status_code != 200:
            logging.error(f"Failed to fetch data. Status code: {response.status_code}")
            sys.exit(1)
        response.encoding = "gb2312"
        soup = BeautifulSoup(response.text, "lxml")
        
        # 尝试多种方式查找表格
        trs = []
        tbody = soup.find("tbody", attrs={"id": "tablelist"})
        if tbody:
            trs = tbody.find_all("tr.t_tr1")
        else:
            # 尝试查找table元素
            table = soup.find("table", attrs={"id": "tablelist"})
            if table:
                tbody = table.find("tbody")
                if tbody:
                    trs = tbody.find_all("tr.t_tr1")
        
        # 如果还没找到，尝试直接查找tr元素
        if not trs:
            trs = soup.find_all("tr", class_="t_tr1")
        
        if not trs:
            logging.error("Could not find the table rows with class 't_tr1'.")
            sys.exit(1)
        
        data = []
        for tr in trs:
            item = {}
            try:
                tds = tr.find_all("td")
                # 修复：正确计算非注释的td元素数量
                # 过滤掉可能的注释td元素，只计算有实际内容的td
                valid_tds = [td for td in tds if td.get_text().strip()]
                
                # 检查是否有足够的有效列（至少需要2列：期号和号码）
                if len(valid_tds) < 2:
                    logging.warning(f"Skipping incomplete row: {tr}")
                    continue
                
                # 提取期号（第1个有效td，索引为0）
                issue_text = valid_tds[0].get_text().strip()
                if not issue_text or not issue_text.isdigit():
                    logging.warning(f"Invalid issue number in row: {tr}")
                    continue
                    
                item["draw_issue"] = issue_text
                
                # 排列5的5个数字在第2个有效td中（索引为1）
                if len(valid_tds) > 1:
                    numbers_text = valid_tds[1].get_text().strip()
                    # 将空格分隔的数字转换为逗号分隔的字符串
                    numbers = numbers_text.split()
                    if len(numbers) == 5:
                        # 验证所有数字都是有效的
                        valid_numbers = []
                        for num in numbers:
                            if num.isdigit() and 0 <= int(num) <= 9:
                                valid_numbers.append(num)
                        
                        if len(valid_numbers) == 5:
                            item["draw_numbers"] = ",".join(valid_numbers)
                        else:
                            logging.warning(f"Invalid numbers format in row: {tr}")
                            continue
                    else:
                        logging.warning(f"Unexpected number count in row: {tr}")
                        continue
                else:
                    logging.warning(f"Missing numbers data in row: {tr}")
                    continue
                
                data.append(item)
            except Exception as e:
                logging.warning(f"Error parsing row: {e}")
                continue
        
        if not data:
            logging.error("No valid data found.")
            sys.exit(1)
            
        df = pd.DataFrame(data)
        # 确保列存在再进行操作
        if "draw_issue" in df.columns:
            # 排序期数
            df['draw_issue'] = pd.to_numeric(df['draw_issue'], errors='coerce')
            df = df.dropna(subset=['draw_issue']).sort_values(by='draw_issue').reset_index(drop=True)
        logging.info(f"成功爬取 {len(df)} 条数据。")
        return df
    except requests.exceptions.RequestException as e:
        logging.error(f"Error fetching data: {e}")
        sys.exit(1)
    except Exception as e:
        logging.error(f"Error in spider: {e}")
        sys.exit(1)

def fetch_plw_data():
    """
    获取并保存排列5历史数据到 'scripts/plw/plw_history.csv'
    """
    name = "plw"
    current_number = get_current_number(name)
    df = spider(name, 25206, current_number)
    
    # 保存路径
    save_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), data_file_name)
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    
    try:
        # 确保必要的列存在
        if "draw_issue" in df.columns and "draw_numbers" in df.columns:
            # 检查是否已存在历史数据文件
            if os.path.exists(save_path):
                # 读取现有数据
                existing_df = pd.read_csv(save_path, encoding="utf-8")
                logging.info(f"已存在数据文件，当前有 {len(existing_df)} 条记录")
                
                # 合并新旧数据并去重
                combined_df = pd.concat([existing_df, df], ignore_index=True)
                # 根据期号去重，保留最新的数据
                combined_df = combined_df.drop_duplicates(subset=["draw_issue"], keep="last")
                # 按期号排序
                combined_df = combined_df.sort_values(by="draw_issue").reset_index(drop=True)
                
                # 保存合并后的数据
                combined_df.to_csv(save_path, encoding="utf-8", index=False, columns=["draw_issue", "draw_numbers"])
                logging.info(f"数据已更新至 {save_path}，当前共有 {len(combined_df)} 条记录")
            else:
                # 保存为CSV文件，使用用户要求的列名
                df.to_csv(save_path, encoding="utf-8", index=False, columns=["draw_issue", "draw_numbers"])
                logging.info(f"数据已保存至 {save_path}")
        else:
            logging.error("Required columns missing in data.")
            sys.exit(1)
    except PermissionError as e:
        # 如果直接保存失败，尝试保存到临时文件然后重命名
        temp_path = save_path + ".tmp"
        try:
            if os.path.exists(save_path):
                # 读取现有数据
                existing_df = pd.read_csv(save_path, encoding="utf-8")
                logging.info(f"已存在数据文件，当前有 {len(existing_df)} 条记录")
                
                # 合并新旧数据并去重
                combined_df = pd.concat([existing_df, df], ignore_index=True)
                # 根据期号去重，保留最新的数据
                combined_df = combined_df.drop_duplicates(subset=["draw_issue"], keep="last")
                # 按期号排序
                combined_df = combined_df.sort_values(by="draw_issue").reset_index(drop=True)
                
                # 保存到临时文件
                combined_df.to_csv(temp_path, encoding="utf-8", index=False, columns=["draw_issue", "draw_numbers"])
            else:
                # 保存到临时文件
                df.to_csv(temp_path, encoding="utf-8", index=False, columns=["draw_issue", "draw_numbers"])
            
            # 如果存在旧文件，先删除
            if os.path.exists(save_path):
                os.remove(save_path)
            os.rename(temp_path, save_path)
            logging.info(f"数据已保存至 {save_path} (通过临时文件)")
        except Exception as e:
            logging.error(f"Error saving data to CSV (even with temp file): {e}")
            # 添加重试机制
            import time
            for i in range(3):
                try:
                    time.sleep(1)  # 等待1秒再试
                    if os.path.exists(save_path):
                        # 读取现有数据
                        existing_df = pd.read_csv(save_path, encoding="utf-8")
                        logging.info(f"已存在数据文件，当前有 {len(existing_df)} 条记录")
                        
                        # 合并新旧数据并去重
                        combined_df = pd.concat([existing_df, df], ignore_index=True)
                        # 根据期号去重，保留最新的数据
                        combined_df = combined_df.drop_duplicates(subset=["draw_issue"], keep="last")
                        # 按期号排序
                        combined_df = combined_df.sort_values(by="draw_issue").reset_index(drop=True)
                        
                        # 保存合并后的数据
                        combined_df.to_csv(save_path, encoding="utf-8", index=False, columns=["draw_issue", "draw_numbers"])
                    else:
                        df.to_csv(save_path, encoding="utf-8", index=False, columns=["draw_issue", "draw_numbers"])
                    logging.info(f"数据已保存至 {save_path} (第{i+1}次重试成功)")
                    return
                except Exception as retry_e:
                    logging.warning(f"第{i+1}次重试失败: {retry_e}")
            sys.exit(1)
    except Exception as e:
        logging.error(f"Error saving data to CSV: {e}")
        sys.exit(1)

if __name__ == "__main__":
    fetch_plw_data()
