# -*- coding: utf-8 -*-
# Author   : ZhangQing
# Time     : 2025-07-15 23:18
# File     : aggregator.py
# Project  : dynamic-portfolio-optimizer
# Desc     : 数据聚合器

from typing import Dict, List, Optional, Any
import pandas as pd
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor, as_completed
import asyncio
from dataclasses import dataclass
from .adapters.base_adapter import BaseDataAdapter
from .adapters.yfinance_adapter import YFinanceAdapter
from .adapters.alpha_vantage_adapter import AlphaVantageAdapter
from .adapters.polygon_adapter import PolygonAdapter
from .adapters.alpaca_adapter import AlpacaAdapter
from .adapters.finnhub_adapter import FinnhubAdapter
from .adapters.twelve_data_adapter import TwelveDataAdapter
from .adapters.marketstack_adapter import MarketstackAdapter
from .adapters.ib_adapter import IBAdapter
from config.data_sources import DataSourceManager
import logging

logger = logging.getLogger(__name__)


@dataclass
class DataRequest:
    """数据请求对象"""
    symbol: str
    start_date: datetime
    end_date: datetime
    interval: str = '1d'
    data_type: str = 'stock'  # stock, options, fundamentals, news
    sources: Optional[List[str]] = None
    priority_sources: Optional[List[str]] = None


class DataAggregator:
    """多数据源聚合器"""

    def __init__(self):
        self.source_manager = DataSourceManager()
        self.adapters: Dict[str, BaseDataAdapter] = {}
        self._init_adapters()

    def _init_adapters(self):
        """初始化数据源适配器"""
        configs = self.source_manager.get_enabled_sources()

        for source_name, config in configs.items():
            try:
                if source_name == 'yfinance':
                    self.adapters[source_name] = YFinanceAdapter(config)
                elif source_name == 'alpha_vantage':
                    self.adapters[source_name] = AlphaVantageAdapter(config)
                elif source_name == 'polygon':
                    self.adapters[source_name] = PolygonAdapter(config)
                elif source_name == 'alpaca':
                    self.adapters[source_name] = AlpacaAdapter(config)
                elif source_name == 'finnhub':
                    self.adapters[source_name] = FinnhubAdapter(config)
                elif source_name == 'twelve_data':
                    self.adapters[source_name] = TwelveDataAdapter(config)
                elif source_name == 'marketstack':
                    self.adapters[source_name] = MarketstackAdapter(config)
                elif source_name == 'ib_api':
                    self.adapters[source_name] = IBAdapter(config)

                logger.info(f"✅ 已初始化{config.name}适配器")

            except Exception as e:
                logger.error(f"❌ 初始化{config.name}适配器失败: {e}")

    def get_stock_data(self, request: DataRequest) -> pd.DataFrame:
        """获取股票数据"""
        return self._fetch_data_parallel(request, 'get_stock_data')

    def get_options_data(self, request: DataRequest) -> pd.DataFrame:
        """获取期权数据"""
        return self._fetch_data_parallel(request, 'get_options_data')

    def get_fundamentals(self, request: DataRequest) -> Dict[str, Any]:
        """获取基本面数据"""
        return self._fetch_fundamentals_parallel(request)

    def get_news(self, request: DataRequest, limit: int = 10) -> List[Dict[str, Any]]:
        """获取新闻数据"""
        return self._fetch_news_parallel(request, limit)

    def _fetch_data_parallel(self, request: DataRequest, method_name: str) -> pd.DataFrame:
        """并行获取数据"""
        # 确定要使用的数据源
        sources_to_use = request.sources or list(self.adapters.keys())

        # 优先级排序
        if request.priority_sources:
            priority_sources = [s for s in request.priority_sources if s in sources_to_use]
            other_sources = [s for s in sources_to_use if s not in request.priority_sources]
            sources_to_use = priority_sources + other_sources

        results = []

        # 使用线程池并行获取数据
        with ThreadPoolExecutor(max_workers=min(len(sources_to_use), 5)) as executor:
            future_to_source = {}

            for source_name in sources_to_use:
                if source_name in self.adapters:
                    adapter = self.adapters[source_name]
                    method = getattr(adapter, method_name)

                    if method_name == 'get_options_data':
                        future = executor.submit(method, request.symbol, request.end_date)
                    else:
                        future = executor.submit(
                            method, request.symbol, request.start_date,
                            request.end_date, request.interval
                        )

                    future_to_source[future] = source_name

            # 收集结果
            for future in as_completed(future_to_source):
                source_name = future_to_source[future]
                try:
                    result = future.result(timeout=30)
                    if not result.empty:
                        results.append(result)
                        logger.info(f"✅ 从{source_name}获取到{len(result)}条数据")

                        # 如果有优先级源返回数据，立即返回
                        if request.priority_sources and source_name in request.priority_sources:
                            return result
                    else:
                        logger.warning(f"⚠️ {source_name}返回空数据")

                except Exception as e:
                    logger.error(f"❌ 从{source_name}获取数据失败: {e}")

        # 合并数据
        if results:
            return self._merge_dataframes(results)
        else:
            logger.warning(f"⚠️ 所有数据源都未能获取到{request.symbol}的数据")
            return pd.DataFrame()

    def _fetch_fundamentals_parallel(self, request: DataRequest) -> Dict[str, Any]:
        """并行获取基本面数据"""
        sources_to_use = request.sources or list(self.adapters.keys())

        with ThreadPoolExecutor(max_workers=min(len(sources_to_use), 5)) as executor:
            future_to_source = {}

            for source_name in sources_to_use:
                if source_name in self.adapters:
                    adapter = self.adapters[source_name]
                    future = executor.submit(adapter.get_fundamentals, request.symbol)
                    future_to_source[future] = source_name

            # 收集结果并合并
            fundamentals_data = {}
            for future in as_completed(future_to_source):
                source_name = future_to_source[future]
                try:
                    result = future.result(timeout=30)
                    if result:
                        fundamentals_data.update(result)
                        logger.info(f"✅ 从{source_name}获取到基本面数据")

                        # 如果有优先级源，优先使用
                        if request.priority_sources and source_name in request.priority_sources:
                            return result
                except Exception as e:
                    logger.error(f"❌ 从{source_name}获取基本面数据失败: {e}")

            return fundamentals_data

    def _fetch_news_parallel(self, request: DataRequest, limit: int) -> List[Dict[str, Any]]:
        """并行获取新闻数据"""
        sources_to_use = request.sources or list(self.adapters.keys())
        all_news = []

        with ThreadPoolExecutor(max_workers=min(len(sources_to_use), 5)) as executor:
            future_to_source = {}

            for source_name in sources_to_use:
                if source_name in self.adapters:
                    adapter = self.adapters[source_name]
                    future = executor.submit(adapter.get_news, request.symbol, limit)
                    future_to_source[future] = source_name

            # 收集所有新闻
            for future in as_completed(future_to_source):
                source_name = future_to_source[future]
                try:
                    result = future.result(timeout=30)
                    if result:
                        all_news.extend(result)
                        logger.info(f"✅ 从{source_name}获取到{len(result)}条新闻")
                except Exception as e:
                    logger.error(f"❌ 从{source_name}获取新闻失败: {e}")

        # 去重并排序
        unique_news = self._deduplicate_news(all_news)
        return sorted(unique_news, key=lambda x: x['published_at'], reverse=True)[:limit]

    def _merge_dataframes(self, dataframes: List[pd.DataFrame]) -> pd.DataFrame:
        """合并多个数据源的DataFrame"""
        if not dataframes:
            return pd.DataFrame()

        if len(dataframes) == 1:
            return dataframes[0]

        # 按数据质量和完整性排序
        sorted_dfs = sorted(dataframes, key=lambda df: (
            len(df),  # 数据量
            df.isnull().sum().sum(),  # 缺失值（越少越好）
            1 if 'volume' in df.columns else 0  # 是否有成交量
        ), reverse=True)

        # 使用最佳数据源作为基础
        base_df = sorted_dfs[0].copy()

        # 填补缺失数据
        for df in sorted_dfs[1:]:
            # 对齐索引
            common_index = base_df.index.intersection(df.index)
            if not common_index.empty:
                for col in df.columns:
                    if col in base_df.columns and col not in ['source', 'symbol']:
                        # 填补缺失值
                        mask = base_df.loc[common_index, col].isnull()
                        if mask.any():
                            base_df.loc[common_index[mask], col] = df.loc[common_index[mask], col]

        # 添加数据源信息
        sources = [df['source'].iloc[0] for df in dataframes if 'source' in df.columns]
        base_df['sources'] = ','.join(set(sources))

        return base_df

    def _deduplicate_news(self, news_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """去重新闻"""
        seen_titles = set()
        unique_news = []

        for news in news_list:
            title = news.get('title', '').lower().strip()
            if title and title not in seen_titles:
                seen_titles.add(title)
                unique_news.append(news)

        return unique_news

    def get_available_sources(self) -> List[str]:
        """获取可用的数据源列表"""
        return list(self.adapters.keys())

    def test_connections(self) -> Dict[str, bool]:
        """测试各数据源连接状态"""
        test_symbol = 'AAPL'
        test_start = datetime(2024, 1, 1)
        test_end = datetime(2024, 1, 2)

        results = {}

        for source_name, adapter in self.adapters.items():
            try:
                # 尝试获取少量测试数据
                test_data = adapter.get_stock_data(test_symbol, test_start, test_end)
                results[source_name] = not test_data.empty

            except Exception as e:
                logger.error(f"测试{source_name}连接失败: {e}")
                results[source_name] = False

        return results