#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ENTSO-E网页爬虫
基于网页爬取方式获取电力负荷数据，无需API Token
适配新的数据库表结构
"""

import requests
from bs4 import BeautifulSoup
import pandas as pd
from tqdm import tqdm
import time
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Optional, Tuple
import os
import sys
from sqlalchemy import text
from sqlalchemy.exc import IntegrityError

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))

from backend.config.database import get_db_session

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('entsoe_web_crawler.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class ENTSOEWebCrawler:
    """ENTSO-E网页爬虫"""
    
    def __init__(self):
        """初始化爬虫"""
        self.base_url = "https://transparency.entsoe.eu/load-domain/r2/totalLoadR2/show"
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        })
        
        # 比利时区域代码
        self.area_code = "10YBE----------2"  # 比利时区域代码
        
        # 数据质量统计
        self.stats = {
            'total_requests': 0,
            'successful_requests': 0,
            'failed_requests': 0,
            'total_records': 0,
            'inserted_records': 0,
            'skipped_records': 0,
            'error_dates': []
        }
    
    def fetch_load_data(self, date_str: str) -> Optional[List[List]]:
        """
        获取指定日期的负荷数据
        
        Args:
            date_str: 日期字符串 (DD.MM.YYYY格式)
            
        Returns:
            负荷数据列表，如果失败返回None
        """
        try:
            # 构造URL参数 - 使用更简单的参数
            params = {
                'name': '',
                'defaultValue': 'false',
                'viewType': 'TABLE',
                'areaType': 'BZN',
                'atch': 'false',
                'dateTime.dateTime': f'{date_str}+00:00|CET|DAY',
                'biddingZone.values': f'CTY|{self.area_code}!BZN|{self.area_code}',
                'dateTime.timezone': 'CET_CEST',
                'dateTime.timezone_input': 'CET+(UTC+1)+/+CEST+(UTC+2)'
            }
            
            logger.info(f"正在获取 {date_str} 的负荷数据...")
            logger.info(f"请求URL: {self.base_url}")
            logger.info(f"请求参数: {params}")
            
            response = self.session.get(self.base_url, params=params, timeout=30)
            self.stats['total_requests'] += 1
            
            logger.info(f"响应状态码: {response.status_code}")
            logger.info(f"响应头: {dict(response.headers)}")
            
            if response.status_code == 200:
                self.stats['successful_requests'] += 1
                # 保存响应内容用于调试
                with open(f'debug_response_{date_str.replace(".", "_")}.html', 'w', encoding='utf-8') as f:
                    f.write(response.text)
                logger.info(f"响应内容已保存到 debug_response_{date_str.replace('.', '_')}.html")
                
                return self._parse_html_table(response.text, date_str)
            else:
                self.stats['failed_requests'] += 1
                logger.error(f"请求失败: {response.status_code} - {response.text[:500]}")
                return None
                
        except Exception as e:
            self.stats['failed_requests'] += 1
            logger.error(f"获取 {date_str} 负荷数据时发生错误: {str(e)}")
            return None
    
    def _parse_html_table(self, html_content: str, date_str: str) -> Optional[List[List]]:
        """
        解析HTML表格并提取负荷数据
        
        Args:
            html_content: HTML内容
            date_str: 日期字符串
            
        Returns:
            解析后的负荷数据列表
        """
        try:
            soup = BeautifulSoup(html_content, 'html.parser')
            
            # 查找所有表格
            tables = soup.find_all("table")
            logger.info(f"找到 {len(tables)} 个表格")
            
            if not tables:
                logger.warning(f"{date_str} 未找到任何表格")
                # 尝试查找其他可能的数据容器
                divs = soup.find_all("div", class_="table")
                logger.info(f"找到 {len(divs)} 个div.table")
                
                # 查找所有包含数据的元素
                data_elements = soup.find_all(["tr", "div"], class_=["row", "data-row"])
                logger.info(f"找到 {len(data_elements)} 个数据元素")
                
                return None
            
            data = []
            
            for table_idx, table in enumerate(tables):
                logger.info(f"解析第 {table_idx + 1} 个表格")
                
                rows = table.find_all("tr")
                logger.info(f"表格 {table_idx + 1} 有 {len(rows)} 行")
                
                for row_idx, row in enumerate(rows):
                    # 跳过非Tag类型的行
                    if not hasattr(row, "find_all"):
                        continue
                    
                    # 获取所有单元格
                    cells = row.find_all(["td", "th"])
                    logger.debug(f"行 {row_idx} 有 {len(cells)} 个单元格")
                    
                    if len(cells) >= 3:
                        # 提取文本内容
                        cell_texts = []
                        for cell in cells:
                            text = cell.get_text(strip=True).replace('\xa0', '').replace('\n', ' ')
                            cell_texts.append(text)
                        
                        logger.debug(f"单元格内容: {cell_texts}")
                        
                        # 尝试找到时间段和负荷值
                        time_interval = None
                        actual_load = None
                        
                        for i, text in enumerate(cell_texts):
                            # 查找时间段格式 (HH:MM - HH:MM)
                            if '-' in text and ':' in text:
                                time_interval = text
                            # 查找数字格式的负荷值
                            elif text.replace(',', '').replace('.', '').isdigit():
                                actual_load = text
                        
                        if time_interval and actual_load:
                            try:
                                load_val = float(actual_load.replace(',', ''))
                                data.append([date_str, time_interval, load_val])
                                logger.debug(f"找到数据: {date_str} {time_interval} {load_val}")
                            except ValueError:
                                logger.warning(f"{date_str} {time_interval} 负荷值解析失败: {actual_load}")
                                continue
            
            if data:
                logger.info(f"{date_str} 成功解析 {len(data)} 条数据")
                return data
            else:
                logger.warning(f"{date_str} 未找到有效数据")
                # 输出页面结构用于调试
                logger.info(f"页面标题: {soup.title.string if soup.title else '无标题'}")
                logger.info(f"页面内容长度: {len(html_content)}")
                return None
                
        except Exception as e:
            logger.error(f"解析 {date_str} HTML表格时发生错误: {str(e)}")
            return None
    
    def _convert_time_interval_to_t_idx(self, time_interval: str) -> int:
        """
        将时间段转换为时间点索引(1-96)
        
        Args:
            time_interval: 时间段字符串 (如 "00:00 - 00:15")
            
        Returns:
            时间点索引 (1-96)
        """
        try:
            # 提取开始时间
            start_time = time_interval.split('-')[0].strip()
            hour, minute = map(int, start_time.split(':'))
            
            # 计算时间点索引 (每15分钟一个点，从0:00开始)
            t_idx = (hour * 4) + (minute // 15) + 1
            
            return t_idx
        except Exception as e:
            logger.error(f"时间转换失败 {time_interval}: {str(e)}")
            return 0
    
    def _convert_date_format(self, date_str: str) -> str:
        """
        转换日期格式从 DD.MM.YYYY 到 YYYY-MM-DD
        
        Args:
            date_str: 原始日期字符串 (DD.MM.YYYY)
            
        Returns:
            转换后的日期字符串 (YYYY-MM-DD)
        """
        try:
            day, month, year = date_str.split('.')
            return f"{year}-{month.zfill(2)}-{day.zfill(2)}"
        except Exception as e:
            logger.error(f"日期格式转换失败 {date_str}: {str(e)}")
            return date_str
    
    def _prepare_database_data(self, raw_data: List[List]) -> List[Dict]:
        """
        准备数据库插入数据
        
        Args:
            raw_data: 原始数据列表
            
        Returns:
            数据库插入数据列表
        """
        db_data = []
        
        for date_str, time_interval, load_val in raw_data:
            # 转换日期格式
            dt = self._convert_date_format(date_str)
            
            # 转换时间点索引
            t_idx = self._convert_time_interval_to_t_idx(time_interval)
            
            if t_idx == 0:
                continue
            
            # 计算精确时间戳
            try:
                start_time = time_interval.split('-')[0].strip()
                hour, minute = map(int, start_time.split(':'))
                dtm = datetime.strptime(dt, '%Y-%m-%d').replace(hour=hour, minute=minute)
                
                db_data.append({
                    'dt': dt,
                    't_idx': t_idx,
                    'dtm': dtm,
                    'load_val': load_val
                })
            except Exception as e:
                logger.error(f"时间戳计算失败 {date_str} {time_interval}: {str(e)}")
                continue
        
        return db_data
    
    def save_to_database(self, db_data: List[Dict]) -> None:
        """
        将数据保存到数据库
        
        Args:
            db_data: 数据库插入数据列表
        """
        if not db_data:
            return
        
        db = get_db_session()
        
        try:
            for record in db_data:
                try:
                    # 使用原生SQL插入，避免ORM映射问题
                    sql = """
                    INSERT INTO load_data (dt, t_idx, dtm, load_val) 
                    VALUES (:dt, :t_idx, :dtm, :load_val)
                    ON DUPLICATE KEY UPDATE 
                    load_val = VALUES(load_val),
                    created_at = CURRENT_TIMESTAMP
                    """
                    
                    db.execute(text(sql), record)
                    self.stats['inserted_records'] += 1
                    
                except IntegrityError:
                    db.rollback()
                    self.stats['skipped_records'] += 1
                    logger.warning(f"跳过重复数据: {record['dt']} t_idx={record['t_idx']}")
                except Exception as e:
                    db.rollback()
                    logger.error(f"插入数据失败 {record}: {str(e)}")
                    continue
            
            db.commit()
            logger.info(f"成功插入 {len(db_data)} 条数据")
            
        except Exception as e:
            db.rollback()
            logger.error(f"保存到数据库时发生错误: {str(e)}")
        finally:
            db.close()
    
    def crawl_date_range(self, start_date: str, end_date: str) -> None:
        """
        爬取指定日期范围的负荷数据
        
        Args:
            start_date: 开始日期 (YYYY-MM-DD)
            end_date: 结束日期 (YYYY-MM-DD)
        """
        logger.info(f"开始爬取 {start_date} 到 {end_date} 的负荷数据")
        
        # 生成日期范围
        start_dt = datetime.strptime(start_date, '%Y-%m-%d')
        end_dt = datetime.strptime(end_date, '%Y-%m-%d')
        
        current_dt = start_dt
        total_days = (end_dt - start_dt).days + 1
        
        with tqdm(total=total_days, desc="爬取进度") as pbar:
            while current_dt <= end_dt:
                # 转换为网页格式的日期
                date_str = current_dt.strftime('%d.%m.%Y')
                
                # 获取负荷数据
                raw_data = self.fetch_load_data(date_str)
                
                if raw_data:
                    # 准备数据库数据
                    db_data = self._prepare_database_data(raw_data)
                    
                    # 保存到数据库
                    self.save_to_database(db_data)
                    
                    self.stats['total_records'] += len(raw_data)
                else:
                    self.stats['error_dates'].append(date_str)
                    logger.warning(f"{date_str} 获取数据失败")
                
                # 更新进度条
                pbar.update(1)
                
                # 添加延迟避免请求过于频繁
                time.sleep(1.5)
                
                current_dt += timedelta(days=1)
        
        # 输出统计信息
        self._print_stats()
    
    def _print_stats(self) -> None:
        """打印爬取统计信息"""
        logger.info("=" * 50)
        logger.info("爬取统计信息:")
        logger.info(f"总请求数: {self.stats['total_requests']}")
        logger.info(f"成功请求: {self.stats['successful_requests']}")
        logger.info(f"失败请求: {self.stats['failed_requests']}")
        logger.info(f"总记录数: {self.stats['total_records']}")
        logger.info(f"插入记录: {self.stats['inserted_records']}")
        logger.info(f"跳过记录: {self.stats['skipped_records']}")
        
        if self.stats['error_dates']:
            logger.info(f"失败日期: {len(self.stats['error_dates'])} 个")
            logger.info(f"失败日期列表: {self.stats['error_dates'][:10]}...")  # 只显示前10个
        
        logger.info("=" * 50)
    
    def test_connection(self) -> bool:
        """
        测试网页连接
        
        Returns:
            连接是否成功
        """
        try:
            # 测试获取今天的数据
            today = datetime.now().strftime('%d.%m.%Y')
            data = self.fetch_load_data(today)
            
            if data is not None and len(data) > 0:
                logger.info("网页连接测试成功")
                return True
            else:
                logger.error("网页连接测试失败")
                return False
                
        except Exception as e:
            logger.error(f"网页连接测试时发生错误: {str(e)}")
            return False


def main():
    """主函数"""
    print("ENTSO-E网页爬虫")
    print("=" * 50)
    
    # 创建爬虫实例
    crawler = ENTSOEWebCrawler()
    
    # 测试连接
    if not crawler.test_connection():
        logger.error("网页连接失败，请检查网络连接")
        return
    
    # 设置爬取日期范围
    start_date = "2015-01-01"
    end_date = "2025-07-13"
    
    # 开始爬取数据
    try:
        crawler.crawl_date_range(start_date, end_date)
        logger.info("数据爬取完成！")
        
    except KeyboardInterrupt:
        logger.info("用户中断爬取过程")
    except Exception as e:
        logger.error(f"爬取过程中发生错误: {str(e)}")


if __name__ == "__main__":
    main()