import os
import requests
import pandas as pd
from bs4 import BeautifulSoup
import urllib3
import sys
import logging

# 禁用不安全请求警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# ---------------- 配置 ----------------
name_path = {
    "fc3d": {
        "name": "fc3d",
        "path": "./"  # 训练脚本运行目录
    }
}
data_file_name = "fc3d_history.csv"

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s | %(levelname)s | %(message)s",
    handlers=[
        logging.StreamHandler(sys.stdout)
    ]
)

def get_url(name):
    """
    构建数据爬取的URL
    :param name: 玩法名称 ('fc3d')
    :return: (url, path)
    """
    # 使用用户指定的URL地址
    url = "https://datachart.500.com/sd/history/"
    path = "inc/history.php?limit=100"  # 直接获取更多数据
    return url, path

def get_current_number(name):
    """
    获取最新一期的期号
    :param name: 玩法名称 ('fc3d') https://datachart.500.com/3d/history/inc/history.php?limit=29
    :return: current_number (字符串)  
    """
    url, _ = get_url(name)
    full_url = f"{url}inc/history.php?limit=29"
    logging.info(f"Fetching URL: {full_url}")
    try:
        response = requests.get(full_url, verify=False, timeout=20)
        if response.status_code != 200:
            logging.error(f"Failed to fetch data. Status code: {response.status_code}")
            sys.exit(1)
        response.encoding = "utf-8"  # 改为utf-8编码
        soup = BeautifulSoup(response.text, "lxml")
        # 根据实际网页结构调整
        current_num_input = soup.find("input", id="end")
        if not current_num_input:
            logging.error("Could not find the 'end' input element on the page.")
            sys.exit(1)
        current_num = current_num_input.get("value", "").strip()
        if not current_num:
            logging.error("The 'end' input element does not have a 'value' attribute.")
            sys.exit(1)
        logging.info(f"最新一期期号：{current_num}")
        return current_num
    except requests.exceptions.RequestException as e:
        logging.error(f"Error fetching current number: {e}")
        sys.exit(1)
    except Exception as e:
        logging.error(f"Unexpected error: {e}")
        sys.exit(1)

def spider(name, start, end):
    """
    爬取历史数据
    :param name: 玩法名称 ('fc3d')
    :param start: 开始期数
    :param end: 结束期数
    :return: DataFrame
    """
    url, path = get_url(name)
    # 修改URL构建方式以适应新的地址格式
    full_url = f"{url}{path}"
    logging.info(f"spider Fetching URL: {full_url}")
    try:
        response = requests.get(full_url, verify=False, timeout=10)
        if response.status_code != 200:
            logging.error(f"spider Failed to fetch data. Status code: {response.status_code}")
            sys.exit(1)
        response.encoding = "utf-8"  # 改为utf-8编码
        soup = BeautifulSoup(response.text, "lxml")
        
        # 尝试多种方式查找表格
        trs = []
        
        # 方法1: 查找tbody元素
        tbody = soup.find("tbody", attrs={"id": "tablelist"})
        if tbody:
            trs = tbody.find_all("tr")
        
        # 方法2: 如果方法1失败，查找table元素
        if not trs:
            table = soup.find("table", attrs={"id": "tablelist"})
            if table:
                tbody = table.find("tbody")
                if tbody:
                    trs = tbody.find_all("tr")
        
        # 方法3: 如果方法2也失败，直接查找所有tr元素
        if not trs:
            trs = soup.find_all("tr")
        
        # 过滤出包含开奖数据的行（通常具有特定的class或包含开奖号码的特征）
        data_rows = []
        for tr in trs:
            # 查找包含开奖号码的行（通常包含3个数字的td）
            tds = tr.find_all("td")
            if len(tds) >= 2:  # 至少要有期号和开奖号码两列
                # 检查第二个td是否包含3个数字（用空格或其它分隔符分隔）
                numbers_text = tds[1].get_text().strip()
                numbers = numbers_text.split()
                if len(numbers) == 3 and all(n.isdigit() and 0 <= int(n) <= 9 for n in numbers):
                    data_rows.append(tr)
        
        if not data_rows:
            logging.error("Could not find the table rows with lottery data.")
            # 记录页面的部分内容用于调试
            page_content = str(soup)[:1000]  # 只记录前1000个字符
            logging.info(f"Page content preview: {page_content}")
            sys.exit(1)
        
        data = []
        for tr in data_rows:
            item = {}
            try:
                tds = tr.find_all("td")
                
                # 提取期号（第1个td）
                issue_text = tds[0].get_text().strip()
                if not issue_text or not issue_text.isdigit():
                    logging.warning(f"Invalid issue number in row: {tr}")
                    continue
                    
                item["draw_issue"] = issue_text
                
                # 提取3个开奖数字（第2个td）
                numbers_text = tds[1].get_text().strip()
                numbers = numbers_text.split()
                if len(numbers) == 3 and all(n.isdigit() and 0 <= int(n) <= 9 for n in numbers):
                    item["num_1"] = int(numbers[0])
                    item["num_2"] = int(numbers[1])
                    item["num_3"] = int(numbers[2])
                else:
                    logging.warning(f"Invalid numbers format in row: {tr}")
                    continue
                
                data.append(item)
            except Exception as e:
                logging.warning(f"Error parsing row: {e}")
                continue
        
        if not data:
            logging.error("No valid data found.")
            sys.exit(1)
            
        df = pd.DataFrame(data)
        # 确保列存在再进行操作
        if "draw_issue" in df.columns:
            # 排序期数
            df['draw_issue'] = pd.to_numeric(df['draw_issue'], errors='coerce')
            df = df.dropna(subset=['draw_issue']).sort_values(by='draw_issue').reset_index(drop=True)
        logging.info(f"成功爬取 {len(df)} 条数据。")
        return df
    except requests.exceptions.RequestException as e:
        logging.error(f"Error fetching data: {e}")
        sys.exit(1)
    except Exception as e:
        logging.error(f"Error in spider: {e}")
        sys.exit(1)

def fetch_3d_data():
    """
    获取并保存3D历史数据到 'scripts/3d/fc3d_history.csv'
    """
    name = "fc3d"
    current_number = get_current_number(name)
    # 3D彩票的历史数据从较早期开始，这里使用一个合理的起始期号
    df = spider(name, 2002001, current_number)
    
    # 保存路径
    save_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), data_file_name)
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    
    try:
        # 确保必要的列存在
        required_columns = ["draw_issue", "num_1", "num_2", "num_3"]
        if all(col in df.columns for col in required_columns):
            # 检查是否已存在历史数据文件
            if os.path.exists(save_path):
                # 读取现有数据
                existing_df = pd.read_csv(save_path, encoding="utf-8")
                logging.info(f"已存在数据文件，当前有 {len(existing_df)} 条记录")
                
                # 合并新旧数据并去重
                combined_df = pd.concat([existing_df, df], ignore_index=True)
                # 根据期号去重，保留最新的数据
                combined_df = combined_df.drop_duplicates(subset=["draw_issue"], keep="last")
                # 按期号排序
                combined_df = combined_df.sort_values(by="draw_issue").reset_index(drop=True)
                
                # 保存合并后的数据
                combined_df.to_csv(save_path, encoding="utf-8", index=False, columns=required_columns)
                logging.info(f"数据已更新至 {save_path}，当前共有 {len(combined_df)} 条记录")
            else:
                # 保存为CSV文件，使用用户要求的列名
                df.to_csv(save_path, encoding="utf-8", index=False, columns=required_columns)
                logging.info(f"数据已保存至 {save_path}")
        else:
            logging.error("Required columns missing in data.")
            sys.exit(1)
    except PermissionError as e:
        # 如果直接保存失败，尝试保存到临时文件然后重命名
        temp_path = save_path + ".tmp"
        try:
            if os.path.exists(save_path):
                # 读取现有数据
                existing_df = pd.read_csv(save_path, encoding="utf-8")
                logging.info(f"已存在数据文件，当前有 {len(existing_df)} 条记录")
                
                # 合并新旧数据并去重
                combined_df = pd.concat([existing_df, df], ignore_index=True)
                # 根据期号去重，保留最新的数据
                combined_df = combined_df.drop_duplicates(subset=["draw_issue"], keep="last")
                # 按期号排序
                combined_df = combined_df.sort_values(by="draw_issue").reset_index(drop=True)
                
                # 保存到临时文件
                combined_df.to_csv(temp_path, encoding="utf-8", index=False, columns=required_columns)
            else:
                # 保存到临时文件
                df.to_csv(temp_path, encoding="utf-8", index=False, columns=required_columns)
            
            # 如果存在旧文件，先删除
            if os.path.exists(save_path):
                os.remove(save_path)
            os.rename(temp_path, save_path)
            logging.info(f"数据已保存至 {save_path} (通过临时文件)")
        except Exception as e:
            logging.error(f"Error saving data to CSV (even with temp file): {e}")
            # 添加重试机制
            import time
            for i in range(3):
                try:
                    time.sleep(1)  # 等待1秒再试
                    if os.path.exists(save_path):
                        # 读取现有数据
                        existing_df = pd.read_csv(save_path, encoding="utf-8")
                        logging.info(f"已存在数据文件，当前有 {len(existing_df)} 条记录")
                        
                        # 合并新旧数据并去重
                        combined_df = pd.concat([existing_df, df], ignore_index=True)
                        # 根据期号去重，保留最新的数据
                        combined_df = combined_df.drop_duplicates(subset=["draw_issue"], keep="last")
                        # 按期号排序
                        combined_df = combined_df.sort_values(by="draw_issue").reset_index(drop=True)
                        
                        # 保存合并后的数据
                        combined_df.to_csv(save_path, encoding="utf-8", index=False, columns=required_columns)
                    else:
                        df.to_csv(save_path, encoding="utf-8", index=False, columns=required_columns)
                    logging.info(f"数据已保存至 {save_path} (第{i+1}次重试成功)")
                    return
                except Exception as retry_e:
                    logging.warning(f"第{i+1}次重试失败: {retry_e}")
            sys.exit(1)
    except Exception as e:
        logging.error(f"Error saving data to CSV: {e}")
        sys.exit(1)

if __name__ == "__main__":
    fetch_3d_data()