#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ENTSO-E电力负荷数据爬虫
爬取2015-2025年电力负荷历史数据并直接导入数据库
"""

import requests
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import time
import logging
from typing import List, Dict, Optional
import json
from sqlalchemy import text
from sqlalchemy.exc import IntegrityError
import os
import sys

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))

from backend.config.database import get_db_session
from backend.entities.load_data_new import LoadData

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('entsoe_crawler.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class ENTSOECrawler:
    """ENTSO-E电力负荷数据爬虫"""
    
    def __init__(self, token: Optional[str] = None):
        """
        初始化爬虫
        
        Args:
            token: ENTSO-E API Token，如果为None则从环境变量获取
        """
        self.token = token or os.getenv('ENTSOE_TOKEN')
        if not self.token:
            raise ValueError("ENTSO-E Token未设置，请设置环境变量ENTSOE_TOKEN或传入token参数")
        
        self.base_url = "https://transparency.entsoe.eu/api"
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        })
        
        # 比利时区域代码
        self.belgium_domain = "10Y1001A1001A82H"  # 比利时区域代码
        
        # 数据质量统计
        self.stats = {
            'total_requests': 0,
            'successful_requests': 0,
            'failed_requests': 0,
            'total_records': 0,
            'inserted_records': 0,
            'skipped_records': 0
        }
    
    def get_load_data(self, start_date: str, end_date: str, country_code: str = "BE") -> Optional[pd.DataFrame]:
        """
        获取指定时间范围的负荷数据
        
        Args:
            start_date: 开始日期 (YYYY-MM-DD)
            end_date: 结束日期 (YYYY-MM-DD)
            country_code: 国家代码，默认比利时
            
        Returns:
            DataFrame包含负荷数据，如果失败返回None
        """
        try:
            params = {
                'securityToken': self.token,
                'documentType': 'A65',  # 实际负荷
                'in_Domain': self.belgium_domain,
                'out_Domain': self.belgium_domain,
                'periodStart': start_date,
                'periodEnd': end_date
            }
            
            logger.info(f"正在获取 {start_date} 到 {end_date} 的负荷数据...")
            
            response = self.session.get(self.base_url, params=params, timeout=30)
            self.stats['total_requests'] += 1
            
            if response.status_code == 200:
                self.stats['successful_requests'] += 1
                return self._parse_xml_response(response.text)
            else:
                self.stats['failed_requests'] += 1
                logger.error(f"API请求失败: {response.status_code} - {response.text}")
                return None
                
        except Exception as e:
            self.stats['failed_requests'] += 1
            logger.error(f"获取负荷数据时发生错误: {str(e)}")
            return None
    
    def _parse_xml_response(self, xml_content: str) -> Optional[pd.DataFrame]:
        """
        解析XML响应并转换为DataFrame
        
        Args:
            xml_content: XML响应内容
            
        Returns:
            解析后的DataFrame
        """
        try:
            # 使用pandas解析XML
            df = pd.read_xml(xml_content)
            
            if df.empty:
                logger.warning("XML响应为空")
                return None
            
            # 提取时间序列数据
            time_series = []
            
            # 查找所有时间序列数据
            import xml.etree.ElementTree as ET
            root = ET.fromstring(xml_content)
            
            # 查找所有Series元素
            for series in root.findall('.//Series'):
                # 获取时间间隔
                period = series.find('Period')
                if period is not None:
                    time_interval = period.find('timeInterval')
                    if time_interval is not None:
                        start_elem = time_interval.find('start')
                        end_elem = time_interval.find('end')
                        
                        if start_elem is not None and start_elem.text is not None:
                            start = start_elem.text
                            end = end_elem.text if end_elem is not None and end_elem.text is not None else start
                            
                            # 获取所有Point数据
                            points = period.findall('Point')
                            for point in points:
                                position_elem = point.find('position')
                                quantity_elem = point.find('quantity')
                                
                                if position_elem is not None and position_elem.text is not None and \
                                   quantity_elem is not None and quantity_elem.text is not None:
                                    try:
                                        position = int(position_elem.text)
                                        quantity = float(quantity_elem.text)
                                        
                                        # 计算实际时间
                                        start_dt = datetime.fromisoformat(start.replace('Z', '+00:00'))
                                        time_delta = timedelta(minutes=15 * (position - 1))
                                        actual_time = start_dt + time_delta
                                        
                                        time_series.append({
                                            'datetime': actual_time,
                                            'load': quantity
                                        })
                                    except (ValueError, TypeError) as e:
                                        logger.warning(f"解析点数据时出错: {e}")
            
            if not time_series:
                logger.warning("未找到有效的时间序列数据")
                return None
            
            # 转换为DataFrame
            df = pd.DataFrame(time_series)
            df = df.sort_values('datetime').reset_index(drop=True)
            
            logger.info(f"成功解析 {len(df)} 条负荷数据")
            return df
            
        except Exception as e:
            logger.error(f"解析XML响应时发生错误: {str(e)}")
            return None
    
    def _prepare_daily_data(self, df: pd.DataFrame) -> Dict[str, List[float]]:
        """
        将15分钟间隔数据转换为每日96点数据
        
        Args:
            df: 包含datetime和load列的DataFrame
            
        Returns:
            按日期分组的96点负荷数据字典
        """
        daily_data = {}
        
        for _, row in df.iterrows():
            # 确保datetime是datetime对象
            if isinstance(row['datetime'], pd.Timestamp):
                dt_obj = row['datetime'].to_pydatetime()
            else:
                dt_obj = row['datetime']
            
            date_str = dt_obj.strftime('%Y-%m-%d')
            time_minutes = dt_obj.hour * 60 + dt_obj.minute
            position = (time_minutes // 15) + 1  # 1-96
            
            if date_str not in daily_data:
                daily_data[date_str] = [None] * 96
            
            if 1 <= position <= 96:
                daily_data[date_str][position - 1] = row['load']
        
        return daily_data
    
    def save_to_database(self, daily_data: Dict[str, List[float]]) -> None:
        """
        将数据保存到数据库
        
        Args:
            daily_data: 按日期分组的96点负荷数据
        """
        db = get_db_session()
        
        try:
            for date_str, load_values in daily_data.items():
                # 检查是否所有96个点都有数据
                if all(v is not None for v in load_values):
                    # 创建LoadData对象
                    load_data = LoadData()
                    load_data.dt = datetime.strptime(date_str, '%Y-%m-%d').date()
                    
                    # 设置96个时间点的负荷值
                    for i, value in enumerate(load_values, 1):
                        setattr(load_data, f't{i}', value)
                    
                    try:
                        db.add(load_data)
                        db.commit()
                        self.stats['inserted_records'] += 1
                        logger.info(f"成功插入 {date_str} 的负荷数据")
                        
                    except IntegrityError:
                        db.rollback()
                        self.stats['skipped_records'] += 1
                        logger.warning(f"跳过重复数据: {date_str}")
                        
                else:
                    # 记录不完整的数据
                    valid_count = sum(1 for v in load_values if v is not None)
                    logger.warning(f"{date_str} 数据不完整: {valid_count}/96 个有效点")
                    self.stats['skipped_records'] += 1
                    
        except Exception as e:
            db.rollback()
            logger.error(f"保存到数据库时发生错误: {str(e)}")
        finally:
            db.close()
    
    def crawl_historical_data(self, start_year: int = 2015, end_year: int = 2025) -> None:
        """
        爬取指定年份范围的历史数据
        
        Args:
            start_year: 开始年份
            end_year: 结束年份
        """
        logger.info(f"开始爬取 {start_year} 到 {end_year} 年的负荷数据")
        
        # 按月份爬取数据，避免单次请求数据量过大
        for year in range(start_year, end_year + 1):
            for month in range(1, 13):
                # 计算月份的开始和结束日期
                if month == 12:
                    start_date = f"{year}-{month:02d}-01"
                    end_date = f"{year + 1}-01-01"
                else:
                    start_date = f"{year}-{month:02d}-01"
                    end_date = f"{year}-{month + 1:02d}-01"
                
                logger.info(f"正在爬取 {year}年{month}月 的数据...")
                
                # 获取负荷数据
                df = self.get_load_data(start_date, end_date)
                
                if df is not None and not df.empty:
                    # 转换为每日96点数据
                    daily_data = self._prepare_daily_data(df)
                    
                    # 保存到数据库
                    self.save_to_database(daily_data)
                    
                    self.stats['total_records'] += len(df)
                    
                    # 添加延迟避免请求过于频繁
                    time.sleep(2)
                else:
                    logger.warning(f"{year}年{month}月 无数据或获取失败")
                
                # 每爬取一个月后稍作休息
                time.sleep(5)
        
        # 输出统计信息
        self._print_stats()
    
    def _print_stats(self) -> None:
        """打印爬取统计信息"""
        logger.info("=" * 50)
        logger.info("爬取统计信息:")
        logger.info(f"总请求数: {self.stats['total_requests']}")
        logger.info(f"成功请求: {self.stats['successful_requests']}")
        logger.info(f"失败请求: {self.stats['failed_requests']}")
        logger.info(f"总记录数: {self.stats['total_records']}")
        logger.info(f"插入记录: {self.stats['inserted_records']}")
        logger.info(f"跳过记录: {self.stats['skipped_records']}")
        logger.info("=" * 50)
    
    def test_connection(self) -> bool:
        """
        测试API连接
        
        Returns:
            连接是否成功
        """
        try:
            # 测试获取最近一天的数据
            today = datetime.now().strftime('%Y-%m-%d')
            df = self.get_load_data(today, today)
            
            if df is not None and not df.empty:
                logger.info("API连接测试成功")
                return True
            else:
                logger.error("API连接测试失败")
                return False
                
        except Exception as e:
            logger.error(f"API连接测试时发生错误: {str(e)}")
            return False


def main():
    """主函数"""
    # 检查环境变量
    token = os.getenv('ENTSOE_TOKEN')
    if not token:
        logger.error("请设置环境变量 ENTSOE_TOKEN")
        logger.info("获取Token的方法:")
        logger.info("1. 访问 https://transparency.entsoe.eu/")
        logger.info("2. 注册账户并登录")
        logger.info("3. 在账户设置中获取API Token")
        return
    
    # 创建爬虫实例
    crawler = ENTSOECrawler(token)
    
    # 测试连接
    if not crawler.test_connection():
        logger.error("API连接失败，请检查Token是否正确")
        return
    
    # 开始爬取数据
    try:
        crawler.crawl_historical_data(2015, 2025)
        logger.info("数据爬取完成！")
        
    except KeyboardInterrupt:
        logger.info("用户中断爬取过程")
    except Exception as e:
        logger.error(f"爬取过程中发生错误: {str(e)}")


if __name__ == "__main__":
    main() 