import os
import json
import logging
import datetime
import threading
import pandas as pd
from concurrent.futures import ThreadPoolExecutor, as_completed

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class ComprehensiveDataCollector:
    """
    综合数据收集器：整合多源数据，提高数据质量和完整性
    特点：
    1. 支持多源数据整合(东方财富、同花顺、腾讯财经、新浪财经、Wind等)
    2. 增量更新机制，减少带宽消耗
    3. 支持实时和历史数据采集
    4. 数据质量检验和修复
    """
    
    def __init__(self, config_path="config/config.yaml"):
        """初始化数据收集器"""
        self.config_path = config_path
        self.load_config()
        self.data_sources = {
            "tushare": self._collect_from_tushare,
            "eastmoney": self._collect_from_eastmoney,
            "sina": self._collect_from_sina,
            "tencent": self._collect_from_tencent,
            "wind": self._collect_from_wind
        }
        self.data_cache = {}
        self.last_update_time = {}
        self.data_lock = threading.Lock()

    def load_config(self):
        """加载配置"""
        try:
            # 这里应该从config.yaml加载配置，为简化示例，使用硬编码配置
            self.config = {
                "data_sources": ["tushare", "eastmoney", "sina"],
                "api_keys": {
                    "tushare": "your_tushare_token",
                    "wind": "your_wind_token"
                },
                "data_path": "data/",
                "update_frequency": {
                    "basic": 7,  # 天
                    "trading": 0.042,  # 1小时
                    "financial": 7,
                    "valuation": 1,
                    "industry": 30,
                    "macro": 1
                },
                "max_threads": 10,
                "retry_times": 3,
                "timeout": 30
            }
            logger.info("配置加载成功")
        except Exception as e:
            logger.error(f"加载配置失败: {str(e)}")
            self.config = {"data_sources": ["tushare"], "max_threads": 5}
    
    def collect_stock_list(self):
        """收集股票列表"""
        logger.info("开始收集股票列表...")
        stock_list = []
        for source in self.config["data_sources"]:
            try:
                if source in self.data_sources:
                    source_stocks = self.data_sources[source]("stock_list", {})
                    if source_stocks:
                        stock_list.extend(source_stocks)
                        logger.info(f"从{source}获取到{len(source_stocks)}只股票")
            except Exception as e:
                logger.error(f"从{source}获取股票列表失败: {str(e)}")
        
        # 去重
        unique_stocks = []
        seen = set()
        for stock in stock_list:
            if stock['ts_code'] not in seen:
                seen.add(stock['ts_code'])
                unique_stocks.append(stock)
        
        logger.info(f"去重后共获取到{len(unique_stocks)}只股票")
        return unique_stocks
    
    def collect_industry_data(self):
        """收集行业数据"""
        logger.info("开始收集行业数据...")
        industry_data = {}
        for source in self.config["data_sources"]:
            try:
                if source in self.data_sources:
                    source_data = self.data_sources[source]("industry", {})
                    if source_data:
                        for industry, stocks in source_data.items():
                            if industry in industry_data:
                                industry_data[industry].update(stocks)
                            else:
                                industry_data[industry] = stocks
                        logger.info(f"从{source}获取到{len(source_data)}个行业数据")
            except Exception as e:
                logger.error(f"从{source}获取行业数据失败: {str(e)}")
        
        return industry_data
    
    def collect_macro_data(self):
        """收集宏观经济数据"""
        logger.info("开始收集宏观经济数据...")
        macro_data = {}
        for source in self.config["data_sources"]:
            try:
                if source in self.data_sources:
                    source_data = self.data_sources[source]("macro", {})
                    if source_data:
                        macro_data.update(source_data)
                        logger.info(f"从{source}获取到宏观经济数据")
            except Exception as e:
                logger.error(f"从{source}获取宏观经济数据失败: {str(e)}")
        
        return macro_data
    
    def collect_stock_data(self, stock_codes, data_types=None):
        """
        收集多只股票的多种数据
        
        Args:
            stock_codes: 股票代码列表
            data_types: 需要收集的数据类型列表，如['basic', 'trading', 'financial', 'valuation']
        
        Returns:
            包含所有股票数据的字典
        """
        if data_types is None:
            data_types = ['basic', 'trading', 'financial', 'valuation', 'forecast']
        
        logger.info(f"开始收集{len(stock_codes)}只股票的{len(data_types)}种数据...")
        
        # 创建任务列表
        tasks = []
        with ThreadPoolExecutor(max_workers=self.config.get("max_threads", 5)) as executor:
            for stock_code in stock_codes:
                for data_type in data_types:
                    task = executor.submit(
                        self.collect_single_stock_data,
                        stock_code,
                        data_type
                    )
                    tasks.append((stock_code, data_type, task))
        
        # 收集结果
        results = {}
        for stock_code, data_type, task in tasks:
            try:
                data = task.result()
                if stock_code not in results:
                    results[stock_code] = {}
                results[stock_code][data_type] = data
            except Exception as e:
                logger.error(f"收集股票{stock_code}的{data_type}数据失败: {str(e)}")
        
        logger.info(f"完成数据收集，成功收集{len(results)}只股票数据")
        return results
    
    def collect_single_stock_data(self, stock_code, data_type):
        """收集单只股票的特定类型数据"""
        logger.debug(f"开始收集股票{stock_code}的{data_type}数据...")
        
        # 检查缓存
        cache_key = f"{stock_code}_{data_type}"
        if cache_key in self.data_cache and self._check_cache_valid(cache_key, data_type):
            logger.debug(f"使用缓存数据: {cache_key}")
            return self.data_cache[cache_key]
        
        # 从多个数据源收集数据
        collected_data = None
        for source in self.config["data_sources"]:
            try:
                if source in self.data_sources:
                    source_data = self.data_sources[source](
                        data_type, 
                        {"stock_code": stock_code}
                    )
                    if source_data:
                        if collected_data is None:
                            collected_data = source_data
                        else:
                            # 合并数据，优先级：Wind > Tushare > 东方财富 > 其他
                            if source == "wind":
                                collected_data = self._merge_data(source_data, collected_data)
                            else:
                                collected_data = self._merge_data(collected_data, source_data)
                        logger.debug(f"从{source}获取到股票{stock_code}的{data_type}数据")
            except Exception as e:
                logger.error(f"从{source}获取股票{stock_code}的{data_type}数据失败: {str(e)}")
        
        # 数据质量检查和修复
        if collected_data:
            collected_data = self._validate_and_fix_data(collected_data, data_type)
            
            # 更新缓存
            with self.data_lock:
                self.data_cache[cache_key] = collected_data
                self.last_update_time[cache_key] = datetime.datetime.now()
            
            # 保存到文件
            self._save_data_to_file(stock_code, data_type, collected_data)
        
        return collected_data
    
    def _check_cache_valid(self, cache_key, data_type):
        """检查缓存是否有效"""
        if cache_key not in self.last_update_time:
            return False
        
        last_update = self.last_update_time[cache_key]
        now = datetime.datetime.now()
        days_diff = (now - last_update).total_seconds() / (24 * 3600)
        
        # 根据数据类型确定更新频率
        update_frequency = self.config.get("update_frequency", {}).get(data_type, 1)
        return days_diff < update_frequency
    
    def _merge_data(self, data1, data2):
        """合并两个数据源的数据"""
        # 实际实现需要根据数据结构进行定制化合并
        if isinstance(data1, dict) and isinstance(data2, dict):
            merged = data1.copy()
            for key, value in data2.items():
                if key not in merged:
                    merged[key] = value
                elif isinstance(value, list) and isinstance(merged[key], list):
                    # 合并列表（例如交易数据）
                    existing_dates = {item.get('trade_date', '') for item in merged[key]}
                    for item in value:
                        if item.get('trade_date', '') not in existing_dates:
                            merged[key].append(item)
                elif isinstance(value, dict) and isinstance(merged[key], dict):
                    # 递归合并字典
                    merged[key] = self._merge_data(merged[key], value)
            return merged
        return data1  # 默认返回第一个数据源的数据
    
    def _validate_and_fix_data(self, data, data_type):
        """验证和修复数据"""
        if not data:
            return data
        
        # 根据数据类型进行不同的验证和修复
        if data_type == 'trading':
            # 示例：检查并修复交易数据中的异常值
            if isinstance(data, dict) and 'daily' in data:
                fixed_daily = []
                for item in data['daily']:
                    # 检查价格是否为负
                    if item.get('close', 0) < 0 or item.get('open', 0) < 0:
                        logger.warning(f"检测到异常价格数据: {item}")
                        continue
                    # 检查交易量是否为零但价格变动
                    if item.get('vol', 0) == 0 and item.get('pct_chg', 0) != 0:
                        logger.warning(f"检测到异常交易量数据: {item}")
                        continue
                    fixed_daily.append(item)
                data['daily'] = fixed_daily
        
        elif data_type == 'financial':
            # 示例：检查财务数据的一致性
            if isinstance(data, dict) and 'indicator' in data:
                for item in data['indicator']:
                    # 检查ROE与净利润和净资产的一致性
                    if 'roe' in item and 'n_income_attr_p' in item and 'total_hldr_eqy_exc_min_int' in item:
                        if item['total_hldr_eqy_exc_min_int'] > 0:
                            calculated_roe = item['n_income_attr_p'] / item['total_hldr_eqy_exc_min_int'] * 100
                            if abs(calculated_roe - item['roe']) > 5:  # 允许5%的误差
                                logger.warning(f"ROE数据不一致，使用计算值替代")
                                item['roe'] = calculated_roe
        
        return data
    
    def _save_data_to_file(self, stock_code, data_type, data):
        """保存数据到文件"""
        try:
            # 确定文件路径
            data_path = self.config.get("data_path", "data/")
            today = datetime.datetime.now().strftime("%Y%m%d")
            stock_dir = os.path.join(data_path, "tushare_data", "stocks", stock_code)
            os.makedirs(stock_dir, exist_ok=True)
            
            filename = f"{data_type}_{today}.json"
            file_path = os.path.join(stock_dir, filename)
            
            # 保存数据
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=4)
            
            logger.debug(f"数据已保存到: {file_path}")
        except Exception as e:
            logger.error(f"保存数据失败: {str(e)}")
    
    def _collect_from_tushare(self, data_type, params):
        """从Tushare收集数据"""
        # 实际实现需要调用Tushare API
        if data_type == "stock_list":
            # 示例数据
            return [
                {"ts_code": "000001.SZ", "name": "平安银行", "industry": "银行"},
                {"ts_code": "000002.SZ", "name": "万科A", "industry": "房地产"},
                # ... 更多股票
            ]
        elif data_type == "industry":
            # 示例数据
            return {
                "银行": {"000001.SZ", "600000.SH"},
                "房地产": {"000002.SZ", "600048.SH"},
                # ... 更多行业
            }
        elif data_type == "basic" and "stock_code" in params:
            # 示例：读取本地数据作为演示
            stock_code = params["stock_code"]
            try:
                data_path = self.config.get("data_path", "data/")
                file_path = os.path.join(data_path, "tushare_data", "stocks", stock_code, "basic_20250301.json")
                if os.path.exists(file_path):
                    with open(file_path, 'r', encoding='utf-8') as f:
                        return json.load(f)
            except Exception as e:
                logger.error(f"读取本地数据失败: {str(e)}")
            return None
        
        # 其他数据类型的处理...
        return None
    
    def _collect_from_eastmoney(self, data_type, params):
        """从东方财富收集数据"""
        # 实际实现需要模拟网页请求或调用API
        return None
    
    def _collect_from_sina(self, data_type, params):
        """从新浪财经收集数据"""
        # 实际实现需要模拟网页请求或调用API
        return None
    
    def _collect_from_tencent(self, data_type, params):
        """从腾讯财经收集数据"""
        # 实际实现需要模拟网页请求或调用API
        return None
    
    def _collect_from_wind(self, data_type, params):
        """从Wind收集数据"""
        # 实际实现需要调用Wind API
        return None

    def update_all_data(self, force=False):
        """更新所有数据"""
        logger.info("开始更新所有数据...")
        
        # 1. 更新股票列表
        stock_list = self.collect_stock_list()
        
        # 2. 更新行业数据
        self.collect_industry_data()
        
        # 3. 更新宏观数据
        self.collect_macro_data()
        
        # 4. 更新股票数据
        stock_codes = [stock['ts_code'] for stock in stock_list]
        batch_size = 50  # 批量处理
        
        for i in range(0, len(stock_codes), batch_size):
            batch = stock_codes[i:i+batch_size]
            self.collect_stock_data(batch)
            logger.info(f"已完成{i+len(batch)}/{len(stock_codes)}只股票的数据更新")
        
        logger.info("所有数据更新完成")

# 使用示例
if __name__ == "__main__":
    collector = ComprehensiveDataCollector()
    # 收集股票列表
    stock_list = collector.collect_stock_list()
    # 收集行业数据
    industry_data = collector.collect_industry_data()
    # 收集宏观数据
    macro_data = collector.collect_macro_data()
    # 收集特定股票的数据
    stock_data = collector.collect_stock_data(["300750", "688981"])
    # 更新所有数据
    # collector.update_all_data() 