import logging
import os
import pickle
import pandas as pd
import numpy as np
from typing import List, Dict, Any, Optional, Tuple, Callable, Union, Type
import time
from datetime import datetime, timedelta
import re
import matplotlib.pyplot as plt
import seaborn as sns
import requests
import json
from concurrent.futures import ThreadPoolExecutor

# 尝试导入NLP相关库
try:
    import jieba
    import jieba.analyse
    JIEBA_AVAILABLE = True
except ImportError:
    JIEBA_AVAILABLE = False
    logging.warning("jieba未安装，文本分析功能将受限")

try:
    from snownlp import SnowNLP
    SNOWNLP_AVAILABLE = True
except ImportError:
    SNOWNLP_AVAILABLE = False
    logging.warning("SnowNLP未安装，情感分析功能将受限")
    
try:
    from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
    import torch
    TRANSFORMERS_AVAILABLE = True
except ImportError:
    TRANSFORMERS_AVAILABLE = False
    logging.warning("transformers未安装，高级NLP功能将受限")


class AlternativeDataProvider:
    """另类数据提供者基类，提供各种非传统数据获取和缓存功能"""
    
    def __init__(self, cache_dir: str = "./data_cache/alternative", cache_ttl: int = 86400):
        """
        初始化另类数据提供者
        
        参数:
            cache_dir: 缓存目录
            cache_ttl: 缓存有效期（秒）
        """
        self.cache_dir = cache_dir
        self.cache_ttl = cache_ttl
        
        # 创建缓存目录
        os.makedirs(cache_dir, exist_ok=True)
        
    def _get_cache_path(self, data_type: str, params: Dict[str, Any]) -> str:
        """
        获取缓存文件路径
        
        参数:
            data_type: 数据类型
            params: 参数字典
            
        返回:
            str: 缓存文件路径
        """
        # 将参数转换为排序后的字符串
        param_str = "_".join(f"{k}={v}" for k, v in sorted(params.items()) if isinstance(v, (str, int, float, bool)))
        return os.path.join(self.cache_dir, f"{data_type}_{param_str}.pkl")
        
    def _load_from_cache(self, data_type: str, params: Dict[str, Any]) -> Optional[pd.DataFrame]:
        """
        从缓存加载数据
        
        参数:
            data_type: 数据类型
            params: 参数字典
            
        返回:
            Optional[pd.DataFrame]: 数据，如果缓存不存在或过期则返回None
        """
        cache_path = self._get_cache_path(data_type, params)
        
        if not os.path.exists(cache_path):
            return None
            
        # 检查缓存是否过期
        if self.cache_ttl > 0:
            mtime = os.path.getmtime(cache_path)
            if time.time() - mtime > self.cache_ttl:
                return None
                
        try:
            with open(cache_path, 'rb') as f:
                return pickle.load(f)
        except Exception as e:
            logging.warning(f"加载缓存失败: {str(e)}")
            return None
            
    def _save_to_cache(self, data: pd.DataFrame, data_type: str, params: Dict[str, Any]) -> None:
        """
        保存数据到缓存
        
        参数:
            data: 数据
            data_type: 数据类型
            params: 参数字典
        """
        if data is None or data.empty:
            logging.warning(f"数据为空，不缓存 {data_type}")
            return
            
        cache_path = self._get_cache_path(data_type, params)
        
        try:
            with open(cache_path, 'wb') as f:
                pickle.dump(data, f)
        except Exception as e:
            logging.warning(f"保存缓存失败: {str(e)}")
            
    def get_alternative_data(self, data_type: str, **params) -> Optional[pd.DataFrame]:
        """
        获取另类数据，优先从缓存加载
        
        参数:
            data_type: 数据类型
            **params: 参数
            
        返回:
            Optional[pd.DataFrame]: 数据
        """
        # 尝试从缓存加载
        data = self._load_from_cache(data_type, params)
        if data is not None:
            return data
            
        # 根据数据类型获取数据
        method_name = f"_get_{data_type}"
        if hasattr(self, method_name):
            try:
                data = getattr(self, method_name)(**params)
                
                # 缓存数据
                if data is not None and not data.empty:
                    self._save_to_cache(data, data_type, params)
                    
                return data
            except Exception as e:
                logging.error(f"获取{data_type}数据失败: {str(e)}")
                return None
        else:
            logging.error(f"未知的数据类型: {data_type}")
            return None


class BaiduIndexProvider(AlternativeDataProvider):
    """百度指数数据提供者，获取搜索指数和媒体指数数据"""
    
    def __init__(self, cookie: str = None, cache_dir: str = "./data_cache/alternative", cache_ttl: int = 86400):
        """
        初始化百度指数数据提供者
        
        参数:
            cookie: 百度指数cookie
            cache_dir: 缓存目录
            cache_ttl: 缓存有效期（秒）
        """
        super().__init__(cache_dir, cache_ttl)
        self.cookie = cookie
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Cookie': cookie
        }
        
    def _get_search_index(self, keyword: str, start_date: str, end_date: str) -> pd.DataFrame:
        """
        获取百度搜索指数数据
        
        参数:
            keyword: 关键词
            start_date: 开始日期
            end_date: 结束日期
            
        返回:
            pd.DataFrame: 搜索指数数据
        """
        if not self.cookie:
            logging.warning("未设置百度指数cookie，无法获取搜索指数数据")
            return self._generate_dummy_search_index(keyword, start_date, end_date)
            
        # 实际实现中，这里应该调用百度指数API获取数据
        # 由于API访问限制，这里使用模拟数据
        return self._generate_dummy_search_index(keyword, start_date, end_date)
        
    def _generate_dummy_search_index(self, keyword: str, start_date: str, end_date: str) -> pd.DataFrame:
        """生成模拟搜索指数数据"""
        logging.warning("使用模拟百度搜索指数数据")
        
        # 生成日期序列
        start = datetime.strptime(start_date, '%Y-%m-%d')
        end = datetime.strptime(end_date, '%Y-%m-%d')
        dates = pd.date_range(start=start, end=end, freq='D')
        
        # 生成随机指数数据
        np.random.seed(hash(keyword) % 10000)  # 使用关键词作为随机种子，确保同一关键词生成相同的模拟数据
        base_value = 500 + np.random.randint(0, 500)
        trend = np.random.choice([1, 0, -1])  # 上升、平稳或下降趋势
        
        if trend == 1:
            # 上升趋势
            indices = base_value + np.linspace(0, 300, len(dates)) + np.random.normal(0, 50, len(dates))
        elif trend == -1:
            # 下降趋势
            indices = base_value + np.linspace(300, 0, len(dates)) + np.random.normal(0, 50, len(dates))
        else:
            # 平稳趋势
            indices = base_value + np.random.normal(0, 50, len(dates))
            
        # 添加周期性
        indices += 100 * np.sin(np.linspace(0, 6 * np.pi, len(dates)))
        
        # 创建DataFrame
        df = pd.DataFrame({
            'date': dates,
            'keyword': keyword,
            'search_index': indices.astype(int)
        })
        
        df.set_index('date', inplace=True)
        return df
        
    def _get_media_index(self, keyword: str, start_date: str, end_date: str) -> pd.DataFrame:
        """
        获取百度媒体指数数据
        
        参数:
            keyword: 关键词
            start_date: 开始日期
            end_date: 结束日期
            
        返回:
            pd.DataFrame: 媒体指数数据
        """
        if not self.cookie:
            logging.warning("未设置百度指数cookie，无法获取媒体指数数据")
            return self._generate_dummy_media_index(keyword, start_date, end_date)
            
        # 实际实现中，这里应该调用百度指数API获取数据
        # 由于API访问限制，这里使用模拟数据
        return self._generate_dummy_media_index(keyword, start_date, end_date)
        
    def _generate_dummy_media_index(self, keyword: str, start_date: str, end_date: str) -> pd.DataFrame:
        """生成模拟媒体指数数据"""
        logging.warning("使用模拟百度媒体指数数据")
        
        # 生成日期序列
        start = datetime.strptime(start_date, '%Y-%m-%d')
        end = datetime.strptime(end_date, '%Y-%m-%d')
        dates = pd.date_range(start=start, end=end, freq='D')
        
        # 生成随机指数数据
        np.random.seed(hash(keyword) % 10000 + 1)  # 使用关键词作为随机种子，确保同一关键词生成相同的模拟数据
        base_value = 200 + np.random.randint(0, 300)
        
        # 媒体指数波动较大
        indices = base_value + np.random.normal(0, 80, len(dates))
        
        # 添加偶发的峰值（新闻热点）
        num_peaks = np.random.randint(1, 5)  # 1到4个峰值
        for _ in range(num_peaks):
            peak_idx = np.random.randint(0, len(dates))
            peak_length = np.random.randint(1, 7)  # 持续1到6天
            peak_height = np.random.randint(200, 1000)  # 峰值高度
            
            for i in range(peak_length):
                if peak_idx + i < len(dates):
                    decay = np.exp(-i / 2)  # 峰值衰减
                    indices[peak_idx + i] += peak_height * decay
                    
        # 创建DataFrame
        df = pd.DataFrame({
            'date': dates,
            'keyword': keyword,
            'media_index': indices.astype(int)
        })
        
        df.set_index('date', inplace=True)
        return df


class NewsProvider(AlternativeDataProvider):
    """新闻数据提供者，获取财经新闻数据"""
    
    def __init__(self, api_key: str = None, cache_dir: str = "./data_cache/alternative", cache_ttl: int = 86400):
        """
        初始化新闻数据提供者
        
        参数:
            api_key: API密钥
            cache_dir: 缓存目录
            cache_ttl: 缓存有效期（秒）
        """
        super().__init__(cache_dir, cache_ttl)
        self.api_key = api_key
        
    def _get_company_news(self, symbol: str, start_date: str, end_date: str) -> pd.DataFrame:
        """
        获取公司新闻数据
        
        参数:
            symbol: 股票代码
            start_date: 开始日期
            end_date: 结束日期
            
        返回:
            pd.DataFrame: 新闻数据
        """
        if not self.api_key:
            logging.warning("未设置API密钥，无法获取新闻数据")
            return self._generate_dummy_news(symbol, start_date, end_date)
            
        # 实际实现中，这里应该调用新闻API获取数据
        # 由于API访问限制，这里使用模拟数据
        return self._generate_dummy_news(symbol, start_date, end_date)
        
    def _generate_dummy_news(self, symbol: str, start_date: str, end_date: str) -> pd.DataFrame:
        """生成模拟新闻数据"""
        logging.warning("使用模拟新闻数据")
        
        # 生成日期序列
        start = datetime.strptime(start_date, '%Y-%m-%d')
        end = datetime.strptime(end_date, '%Y-%m-%d')
        
        # 生成随机新闻数据
        np.random.seed(hash(symbol) % 10000 + 2)  # 使用股票代码作为随机种子
        
        # 公司名称
        company_name = f"公司{symbol}"
        
        # 新闻标题模板
        title_templates = [
            f"{company_name}发布{{}}{{}}{{}}", 
            f"{company_name}宣布{{}}{{}}{{}}", 
            f"{company_name}{{}}{{}}{{}}", 
            f"业内分析：{company_name}{{}}{{}}{{}}", 
            f"投资者关注：{company_name}{{}}{{}}{{}}"
        ]
        
        # 新闻内容元素
        subjects = ["季度财报", "年度业绩", "新产品", "战略合作", "管理层变动", "业务扩张", "成本控制", "股息政策"]
        actions = ["实现", "推出", "公布", "收购", "投资", "变更", "优化", "调整"]
        results = [
            "超出市场预期", "低于分析师预测", "符合行业标准", "显著提升营收", "导致股价波动", 
            "获得积极反馈", "引发投资者担忧", "对未来发展影响深远"
        ]
        
        # 情感倾向
        sentiments = np.random.choice([-1, 0, 1], size=30, p=[0.3, 0.4, 0.3])  # 30%负面，40%中性，30%正面
        
        # 生成新闻数据
        news_list = []
        current_date = start
        
        while current_date <= end:
            # 每天随机生成0-3条新闻
            num_news = np.random.randint(0, 4)
            
            for _ in range(num_news):
                # 随机生成新闻标题
                title_template = np.random.choice(title_templates)
                subject = np.random.choice(subjects)
                action = np.random.choice(actions)
                result = np.random.choice(results)
                
                title = title_template.format(subject, action, result)
                
                # 随机情感得分
                sentiment_score = np.random.choice(sentiments)
                
                # 添加到新闻列表
                news_list.append({
                    'date': current_date,
                    'symbol': symbol,
                    'title': title,
                    'sentiment': sentiment_score
                })
                
            current_date += timedelta(days=1)
            
        # 创建DataFrame
        if news_list:
            df = pd.DataFrame(news_list)
            # 如果当天有多条新闻，计算平均情感得分
            df = df.groupby(['date', 'symbol']).agg({'sentiment': 'mean'}).reset_index()
            df.set_index('date', inplace=True)
            return df
        else:
            return pd.DataFrame(columns=['date', 'symbol', 'sentiment']).set_index('date')


class SocialMediaProvider(AlternativeDataProvider):
    """社交媒体数据提供者，获取社交媒体情绪数据"""
    
    def __init__(self, api_key: str = None, cache_dir: str = "./data_cache/alternative", cache_ttl: int = 86400):
        """
        初始化社交媒体数据提供者
        
        参数:
            api_key: API密钥
            cache_dir: 缓存目录
            cache_ttl: 缓存有效期（秒）
        """
        super().__init__(cache_dir, cache_ttl)
        self.api_key = api_key
        
    def _get_social_sentiment(self, keyword: str, start_date: str, end_date: str, platform: str = "all") -> pd.DataFrame:
        """
        获取社交媒体情绪数据
        
        参数:
            keyword: 关键词
            start_date: 开始日期
            end_date: 结束日期
            platform: 平台，可选值：'weibo', 'wechat', 'all'
            
        返回:
            pd.DataFrame: 社交媒体情绪数据
        """
        if not self.api_key:
            logging.warning("未设置API密钥，无法获取社交媒体数据")
            return self._generate_dummy_social_sentiment(keyword, start_date, end_date, platform)
            
        # 实际实现中，这里应该调用社交媒体API获取数据
        # 由于API访问限制，这里使用模拟数据
        return self._generate_dummy_social_sentiment(keyword, start_date, end_date, platform)
        
    def _generate_dummy_social_sentiment(self, keyword: str, start_date: str, end_date: str, platform: str = "all") -> pd.DataFrame:
        """生成模拟社交媒体情绪数据"""
        logging.warning("使用模拟社交媒体情绪数据")
        
        # 生成日期序列
        start = datetime.strptime(start_date, '%Y-%m-%d')
        end = datetime.strptime(end_date, '%Y-%m-%d')
        dates = pd.date_range(start=start, end=end, freq='D')
        
        # 生成随机情绪数据
        np.random.seed(hash(keyword + platform) % 10000 + 3)  # 使用关键词和平台作为随机种子
        
        # 情感得分：-1到1之间
        sentiment_mean = np.random.uniform(-0.3, 0.3)  # 基础情感倾向
        sentiment_scores = sentiment_mean + np.random.normal(0, 0.2, len(dates))
        sentiment_scores = np.clip(sentiment_scores, -1, 1)  # 限制在-1到1之间
        
        # 讨论量
        base_volume = np.random.randint(50, 500)
        volume = base_volume + np.random.normal(0, base_volume * 0.3, len(dates))
        volume = np.maximum(volume, 0)  # 确保非负
        
        # 添加偶发的热点
        num_peaks = np.random.randint(1, 5)  # 1到4个热点
        for _ in range(num_peaks):
            peak_idx = np.random.randint(0, len(dates))
            peak_length = np.random.randint(1, 6)  # 持续1到5天
            peak_volume = np.random.randint(base_volume, base_volume * 10)  # 热点讨论量
            peak_sentiment_shift = np.random.uniform(-0.5, 0.5)  # 情感变化
            
            for i in range(peak_length):
                if peak_idx + i < len(dates):
                    decay = np.exp(-i / 2)  # 热点衰减
                    volume[peak_idx + i] += peak_volume * decay
                    sentiment_scores[peak_idx + i] += peak_sentiment_shift * decay
                    sentiment_scores[peak_idx + i] = np.clip(sentiment_scores[peak_idx + i], -1, 1)  # 限制在-1到1之间
                    
        # 创建DataFrame
        df = pd.DataFrame({
            'date': dates,
            'keyword': keyword,
            'platform': platform,
            'sentiment': sentiment_scores,
            'volume': volume.astype(int)
        })
        
        df.set_index('date', inplace=True)
        return df


class AnnouncementProvider(AlternativeDataProvider):
    """公告数据提供者，获取公司公告数据"""
    
    def __init__(self, api_key: str = None, cache_dir: str = "./data_cache/alternative", cache_ttl: int = 86400 * 7):
        """
        初始化公告数据提供者
        
        参数:
            api_key: API密钥
            cache_dir: 缓存目录
            cache_ttl: 缓存有效期（秒）
        """
        super().__init__(cache_dir, cache_ttl)
        self.api_key = api_key
        
    def _get_announcements(self, symbol: str, start_date: str, end_date: str, category: str = "all") -> pd.DataFrame:
        """
        获取公司公告数据
        
        参数:
            symbol: 股票代码
            start_date: 开始日期
            end_date: 结束日期
            category: 公告类别，可选值：'finance', 'operation', 'other', 'all'
            
        返回:
            pd.DataFrame: 公告数据
        """
        if not self.api_key:
            logging.warning("未设置API密钥，无法获取公告数据")
            return self._generate_dummy_announcements(symbol, start_date, end_date, category)
            
        # 实际实现中，这里应该调用公告API获取数据
        # 由于API访问限制，这里使用模拟数据
        return self._generate_dummy_announcements(symbol, start_date, end_date, category)
        
    def _generate_dummy_announcements(self, symbol: str, start_date: str, end_date: str, category: str = "all") -> pd.DataFrame:
        """生成模拟公告数据"""
        logging.warning("使用模拟公告数据")
        
        # 生成日期序列
        start = datetime.strptime(start_date, '%Y-%m-%d')
        end = datetime.strptime(end_date, '%Y-%m-%d')
        
        # 生成随机公告数据
        np.random.seed(hash(symbol + category) % 10000 + 4)  # 使用股票代码和类别作为随机种子
        
        # 公司名称
        company_name = f"公司{symbol}"
        
        # 公告类别
        if category == "all":
            categories = ["finance", "operation", "other"]
        else:
            categories = [category]
            
        # 公告标题模板
        title_templates = {
            "finance": [
                f"{company_name}{{}}{{}}财务报告", 
                f"{company_name}关于{{}}{{}}的公告", 
                f"{company_name}{{}}{{}}财务状况公告"
            ],
            "operation": [
                f"{company_name}关于{{}}{{}}的公告", 
                f"{company_name}{{}}{{}}经营情况公告", 
                f"{company_name}董事会关于{{}}{{}}的决议公告"
            ],
            "other": [
                f"{company_name}{{}}{{}}提示性公告", 
                f"{company_name}关于{{}}{{}}的公告", 
                f"{company_name}{{}}{{}}事项公告"
            ]
        }
        
        # 公告内容元素
        elements = {
            "finance": [
                "第一季度", "第二季度", "第三季度", "第四季度", "年度", 
                "利润分配", "股息发放", "财务预警", "业绩快报"
            ],
            "operation": [
                "重大合同签订", "资产重组", "对外投资", "关联交易", "股份回购", 
                "管理层变动", "战略合作", "诉讼进展"
            ],
            "other": [
                "股东大会", "交易所问询", "回复监管函", "风险提示", "澄清公告", 
                "停牌", "复牌", "补充公告"
            ]
        }
        
        modifiers = ["重大", "关于", "临时", "补充", ""]
        
        # 生成公告数据
        announcement_list = []
        
        # 每个月随机生成1-4条公告
        current_month = start.replace(day=1)
        
        while current_month <= end:
            month_end = (current_month.replace(day=28) + timedelta(days=4)).replace(day=1) - timedelta(days=1)
            month_end = min(month_end, end)
            
            # 每个类别随机生成0-2条公告
            for cat in categories:
                num_announcements = np.random.randint(0, 3)
                
                for _ in range(num_announcements):
                    # 随机生成公告日期
                    announcement_date = current_month + timedelta(days=np.random.randint(0, (month_end - current_month).days + 1))
                    
                    # 随机生成公告标题
                    title_template = np.random.choice(title_templates[cat])
                    element = np.random.choice(elements[cat])
                    modifier = np.random.choice(modifiers)
                    
                    title = title_template.format(modifier, element)
                    
                    # 添加到公告列表
                    announcement_list.append({
                        'date': announcement_date,
                        'symbol': symbol,
                        'category': cat,
                        'title': title
                    })
                    
            current_month = (current_month.replace(day=28) + timedelta(days=4)).replace(day=1)
            
        # 创建DataFrame
        if announcement_list:
            df = pd.DataFrame(announcement_list)
            df.set_index('date', inplace=True)
            df = df.sort_index()
            return df
        else:
            return pd.DataFrame(columns=['date', 'symbol', 'category', 'title']).set_index('date')


class TextAnalyzer:
    """文本分析器，用于分析文本情感和关键词"""
    
    def __init__(self, use_transformers: bool = False, model_name: str = "bert-base-chinese"):
        """
        初始化文本分析器
        
        参数:
            use_transformers: 是否使用transformers
            model_name: 模型名称
        """
        self.use_transformers = use_transformers and TRANSFORMERS_AVAILABLE
        self.model_name = model_name
        
        if self.use_transformers:
            try:
                self.model = AutoModelForSequenceClassification.from_pretrained(model_name)
                self.tokenizer = AutoTokenizer.from_pretrained(model_name)
                self.nlp_pipeline = pipeline("sentiment-analysis", model=self.model, tokenizer=self.tokenizer)
                logging.info(f"使用transformers模型: {model_name}")
            except Exception as e:
                logging.error(f"加载transformers模型失败: {str(e)}")
                self.use_transformers = False
                
    def analyze_sentiment(self, text: str) -> float:
        """
        分析文本情感
        
        参数:
            text: 文本
            
        返回:
            float: 情感得分，-1（极负面）到1（极正面）
        """
        if not text or len(text.strip()) == 0:
            return 0.0
            
        try:
            if self.use_transformers:
                # 使用transformers进行情感分析
                result = self.nlp_pipeline(text[:512])  # 限制长度
                # 根据模型输出映射到-1到1之间
                if "label" in result[0]:
                    if "positive" in result[0]["label"].lower():
                        return result[0]["score"]
                    elif "negative" in result[0]["label"].lower():
                        return -result[0]["score"]
                    else:
                        return 0.0
                else:
                    return result[0]["score"] * 2 - 1  # 假设输出是0-1之间的概率
            elif SNOWNLP_AVAILABLE:
                # 使用SnowNLP进行情感分析
                s = SnowNLP(text)
                return s.sentiments * 2 - 1  # 转换到-1到1之间
            else:
                # 简单规则
                positive_words = ["增长", "盈利", "获利", "利好", "上涨", "突破", "扩大", "成功", "创新", "机遇", "优化", "提高", "提升", "回升", "满意"]
                negative_words = ["下跌", "亏损", "减少", "风险", "问题", "下滑", "萎缩", "失败", "危机", "降低", "停滞", "困难", "减持", "压力", "担忧"]
                
                positive_count = sum(1 for word in positive_words if word in text)
                negative_count = sum(1 for word in negative_words if word in text)
                
                total_count = positive_count + negative_count
                if total_count == 0:
                    return 0.0
                else:
                    return (positive_count - negative_count) / total_count
        except Exception as e:
            logging.error(f"情感分析失败: {str(e)}")
            return 0.0
            
    def extract_keywords(self, text: str, top_n: int = 5) -> List[str]:
        """
        提取文本关键词
        
        参数:
            text: 文本
            top_n: 返回的关键词数量
            
        返回:
            List[str]: 关键词列表
        """
        if not text or len(text.strip()) == 0:
            return []
            
        try:
            if JIEBA_AVAILABLE:
                # 使用jieba提取关键词
                return jieba.analyse.extract_tags(text, topK=top_n)
            else:
                # 简单分词并统计词频
                words = re.findall(r'\w+', text)
                word_counts = {}
                
                for word in words:
                    if len(word) > 1:  # 忽略单字
                        word_counts[word] = word_counts.get(word, 0) + 1
                        
                # 按词频排序
                sorted_words = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)
                return [word for word, _ in sorted_words[:top_n]]
        except Exception as e:
            logging.error(f"关键词提取失败: {str(e)}")
            return []


class AlternativeFactorCalculator:
    """另类因子计算器"""
    
    def __init__(self, text_analyzer: TextAnalyzer = None):
        """
        初始化另类因子计算器
        
        参数:
            text_analyzer: 文本分析器
        """
        self.text_analyzer = text_analyzer or TextAnalyzer()
        
    def calculate_search_index_factor(self, search_data: pd.DataFrame, 
                                   price_data: pd.DataFrame = None,
                                   z_score: bool = True,
                                   ma_periods: List[int] = [5, 10, 20]) -> Dict[str, pd.Series]:
        """
        计算搜索指数因子
        
        参数:
            search_data: 搜索指数数据
            price_data: 价格数据（可选）
            z_score: 是否使用Z-score标准化
            ma_periods: 移动平均周期列表
            
        返回:
            Dict[str, pd.Series]: 因子名称到因子值的映射
        """
        factors = {}
        
        if search_data is None or search_data.empty:
            logging.warning("搜索指数数据为空")
            return factors
            
        # 基础搜索指数因子
        if 'search_index' in search_data.columns:
            base_factor = search_data['search_index']
            
            # Z-score标准化
            if z_score:
                mean = base_factor.rolling(window=60, min_periods=20).mean()
                std = base_factor.rolling(window=60, min_periods=20).std()
                base_factor = (base_factor - mean) / std
                
            factors['search_index'] = base_factor
            
            # 计算变化率
            factors['search_index_change'] = base_factor.pct_change()
            
            # 计算移动平均
            for period in ma_periods:
                ma = base_factor.rolling(window=period).mean()
                factors[f'search_index_ma{period}'] = ma
                
            # 计算移动平均穿越
            for i, period1 in enumerate(ma_periods):
                for period2 in ma_periods[i+1:]:
                    factors[f'search_index_ma{period1}_cross_ma{period2}'] = (
                        factors[f'search_index_ma{period1}'] - factors[f'search_index_ma{period2}']
                    )
                    
            # 如果有价格数据，计算搜索指数与价格的关系
            if price_data is not None and 'close' in price_data.columns:
                # 确保索引对齐
                common_index = base_factor.index.intersection(price_data.index)
                if len(common_index) > 0:
                    aligned_factor = base_factor.loc[common_index]
                    aligned_price = price_data.loc[common_index, 'close']
                    
                    # 计算相关性（滚动30天）
                    if len(common_index) >= 30:
                        corr = aligned_factor.rolling(window=30).corr(aligned_price)
                        factors['search_index_price_corr'] = corr
                        
                    # 计算搜索指数领先价格的预测因子
                    # 假设搜索指数领先价格变动1-3天
                    for lag in [1, 2, 3]:
                        lead_factor = aligned_factor.shift(lag)
                        factors[f'search_index_lead{lag}'] = lead_factor
                        
        return factors
        
    def calculate_news_sentiment_factor(self, news_data: pd.DataFrame,
                                     price_data: pd.DataFrame = None,
                                     z_score: bool = True,
                                     ma_periods: List[int] = [3, 5, 10]) -> Dict[str, pd.Series]:
        """
        计算新闻情感因子
        
        参数:
            news_data: 新闻数据
            price_data: 价格数据（可选）
            z_score: 是否使用Z-score标准化
            ma_periods: 移动平均周期列表
            
        返回:
            Dict[str, pd.Series]: 因子名称到因子值的映射
        """
        factors = {}
        
        if news_data is None or news_data.empty:
            logging.warning("新闻数据为空")
            return factors
            
        # 基础新闻情感因子
        if 'sentiment' in news_data.columns:
            base_factor = news_data['sentiment']
            
            # Z-score标准化
            if z_score:
                mean = base_factor.rolling(window=30, min_periods=10).mean()
                std = base_factor.rolling(window=30, min_periods=10).std()
                std = std.replace(0, 1)  # 避免除以0
                base_factor = (base_factor - mean) / std
                
            factors['news_sentiment'] = base_factor
            
            # 计算移动平均
            for period in ma_periods:
                ma = base_factor.rolling(window=period).mean()
                factors[f'news_sentiment_ma{period}'] = ma
                
            # 计算情感变化趋势
            factors['news_sentiment_change'] = base_factor.diff()
            
            # 计算情感动量（短期与长期情感变化的差异）
            if len(ma_periods) >= 2:
                short_period = min(ma_periods)
                long_period = max(ma_periods)
                factors['news_sentiment_momentum'] = (
                    factors[f'news_sentiment_ma{short_period}'] - factors[f'news_sentiment_ma{long_period}']
                )
                
            # 如果有价格数据，计算新闻情感与价格的关系
            if price_data is not None and 'close' in price_data.columns:
                # 确保索引对齐
                common_index = base_factor.index.intersection(price_data.index)
                if len(common_index) > 0:
                    aligned_factor = base_factor.loc[common_index]
                    aligned_price = price_data.loc[common_index, 'close']
                    
                    # 计算情感与价格变动的一致性
                    price_change = aligned_price.pct_change()
                    
                    # 情感与价格变动方向一致记为1，不一致记为-1
                    sentiment_sign = np.sign(aligned_factor)
                    price_change_sign = np.sign(price_change)
                    consistency = sentiment_sign * price_change_sign
                    
                    factors['news_sentiment_price_consistency'] = consistency
                    
                    # 计算滚动一致性
                    rolling_consistency = consistency.rolling(window=10).mean()
                    factors['news_sentiment_price_consistency_ma10'] = rolling_consistency
                    
        return factors
        
    def calculate_social_sentiment_factor(self, social_data: pd.DataFrame,
                                       price_data: pd.DataFrame = None,
                                       z_score: bool = True,
                                       volume_weighted: bool = True,
                                       ma_periods: List[int] = [3, 5, 10]) -> Dict[str, pd.Series]:
        """
        计算社交情感因子
        
        参数:
            social_data: 社交媒体数据
            price_data: 价格数据（可选）
            z_score: 是否使用Z-score标准化
            volume_weighted: 是否使用讨论量加权
            ma_periods: 移动平均周期列表
            
        返回:
            Dict[str, pd.Series]: 因子名称到因子值的映射
        """
        factors = {}
        
        if social_data is None or social_data.empty:
            logging.warning("社交媒体数据为空")
            return factors
            
        # 基础社交情感因子
        if 'sentiment' in social_data.columns:
            base_factor = social_data['sentiment']
            
            # 讨论量加权
            if volume_weighted and 'volume' in social_data.columns:
                base_factor = base_factor * social_data['volume']
                # 归一化
                volume_sum = social_data['volume'].rolling(window=10, min_periods=1).sum()
                volume_sum = volume_sum.replace(0, 1)  # 避免除以0
                base_factor = base_factor / volume_sum
                
            # Z-score标准化
            if z_score:
                mean = base_factor.rolling(window=30, min_periods=10).mean()
                std = base_factor.rolling(window=30, min_periods=10).std()
                std = std.replace(0, 1)  # 避免除以0
                base_factor = (base_factor - mean) / std
                
            factors['social_sentiment'] = base_factor
            
            # 社交讨论量因子
            if 'volume' in social_data.columns:
                volume_factor = social_data['volume']
                
                # Z-score标准化
                if z_score:
                    volume_mean = volume_factor.rolling(window=30, min_periods=10).mean()
                    volume_std = volume_factor.rolling(window=30, min_periods=10).std()
                    volume_std = volume_std.replace(0, 1)  # 避免除以0
                    volume_factor = (volume_factor - volume_mean) / volume_std
                    
                factors['social_volume'] = volume_factor
                
                # 讨论量突增因子
                volume_ma10 = volume_factor.rolling(window=10).mean()
                volume_ma30 = volume_factor.rolling(window=30).mean()
                factors['social_volume_surge'] = volume_factor / volume_ma30
                
                # 讨论量动量因子
                factors['social_volume_momentum'] = volume_ma10 / volume_ma30
                
            # 计算情感移动平均
            for period in ma_periods:
                ma = base_factor.rolling(window=period).mean()
                factors[f'social_sentiment_ma{period}'] = ma
                
            # 极端情感因子（情感的绝对值）
            factors['social_sentiment_extreme'] = base_factor.abs()
            
            # 情感波动率因子
            factors['social_sentiment_volatility'] = base_factor.rolling(window=10).std()
            
            # 如果有价格数据，计算社交情感与价格的关系
            if price_data is not None and 'close' in price_data.columns:
                # 确保索引对齐
                common_index = base_factor.index.intersection(price_data.index)
                if len(common_index) > 0:
                    aligned_factor = base_factor.loc[common_index]
                    aligned_price = price_data.loc[common_index, 'close']
                    
                    # 计算情感因子与下一日价格变动的关系
                    next_day_return = aligned_price.pct_change().shift(-1)
                    
                    # 情感与下一日价格变动方向一致记为1，不一致记为-1
                    sentiment_sign = np.sign(aligned_factor)
                    next_return_sign = np.sign(next_day_return)
                    predictive_power = sentiment_sign * next_return_sign
                    
                    factors['social_sentiment_predictive_power'] = predictive_power
                    
                    # 计算滚动预测能力
                    rolling_pred_power = predictive_power.rolling(window=20).mean()
                    factors['social_sentiment_predictive_power_ma20'] = rolling_pred_power
                    
        return factors
        
    def calculate_announcement_factor(self, announcement_data: pd.DataFrame,
                                   price_data: pd.DataFrame = None,
                                   keyword_filter: List[str] = None) -> Dict[str, pd.Series]:
        """
        计算公告因子
        
        参数:
            announcement_data: 公告数据
            price_data: 价格数据（可选）
            keyword_filter: 关键词过滤列表
            
        返回:
            Dict[str, pd.Series]: 因子名称到因子值的映射
        """
        factors = {}
        
        if announcement_data is None or announcement_data.empty:
            logging.warning("公告数据为空")
            return factors
            
        # 公告频率因子
        announcement_dates = announcement_data.index
        date_range = pd.date_range(start=announcement_dates.min(), end=announcement_dates.max())
        announcement_count = pd.Series(0, index=date_range)
        
        for date in announcement_dates:
            announcement_count.loc[date] += 1
            
        # 填充缺失值
        announcement_count = announcement_count.fillna(0)
        
        # 计算公告频率（滚动30天内公告数量）
        announcement_frequency = announcement_count.rolling(window=30).sum()
        factors['announcement_frequency'] = announcement_frequency
        
        # 公告异常频率因子（当前频率与历史平均的偏差）
        if len(announcement_frequency) > 60:
            historical_avg = announcement_frequency.rolling(window=60).mean().shift(30)
            factors['announcement_abnormal_frequency'] = announcement_frequency / historical_avg
            
        # 如果有标题列，计算公告情感因子
        if 'title' in announcement_data.columns:
            # 初始化情感得分序列
            sentiment_scores = pd.Series(0.0, index=date_range)
            
            # 分析每个公告的情感
            for date, row in announcement_data.iterrows():
                title = row.get('title', '')
                
                # 如果有关键词过滤，检查标题是否包含关键词
                if keyword_filter:
                    if not any(keyword in title for keyword in keyword_filter):
                        continue
                        
                # 分析情感
                sentiment = self.text_analyzer.analyze_sentiment(title)
                sentiment_scores.loc[date] = sentiment
                
            # 填充缺失值
            sentiment_scores = sentiment_scores.fillna(0)
            
            # 计算滚动情感平均
            sentiment_ma10 = sentiment_scores.rolling(window=10).mean()
            factors['announcement_sentiment'] = sentiment_scores
            factors['announcement_sentiment_ma10'] = sentiment_ma10
            
        # 如果有分类列，计算分类公告频率因子
        if 'category' in announcement_data.columns:
            categories = announcement_data['category'].unique()
            
            for category in categories:
                # 该类别的公告日期
                category_dates = announcement_data[announcement_data['category'] == category].index
                
                # 计算该类别的公告频率
                category_count = pd.Series(0, index=date_range)
                for date in category_dates:
                    category_count.loc[date] += 1
                    
                # 填充缺失值
                category_count = category_count.fillna(0)
                
                # 计算该类别的公告频率（滚动30天内公告数量）
                category_frequency = category_count.rolling(window=30).sum()
                factors[f'announcement_frequency_{category}'] = category_frequency
                
        # 如果有价格数据，计算公告与价格的关系
        if price_data is not None and 'close' in price_data.columns:
            # 确保索引对齐
            common_index = announcement_count.index.intersection(price_data.index)
            if len(common_index) > 0:
                aligned_announcement = announcement_count.loc[common_index]
                aligned_price = price_data.loc[common_index, 'close']
                
                # 计算公告后的价格变动
                price_change = aligned_price.pct_change()
                
                # 有公告当天的价格变动
                announcement_day_change = price_change.copy()
                announcement_day_change[aligned_announcement == 0] = np.nan
                
                # 公告后N天的累积价格变动
                for days in [1, 3, 5]:
                    cumulative_change = aligned_price.pct_change(days).shift(-days)
                    announcement_effect = cumulative_change.copy()
                    announcement_effect[aligned_announcement == 0] = np.nan
                    
                    factors[f'announcement_price_effect_{days}d'] = announcement_effect
                    
        return factors
        
    def calculate_all_alternative_factors(self, symbol: str, 
                                       search_data: pd.DataFrame = None,
                                       news_data: pd.DataFrame = None,
                                       social_data: pd.DataFrame = None,
                                       announcement_data: pd.DataFrame = None,
                                       price_data: pd.DataFrame = None) -> Dict[str, pd.Series]:
        """
        计算所有另类因子
        
        参数:
            symbol: 股票代码
            search_data: 搜索指数数据（可选）
            news_data: 新闻数据（可选）
            social_data: 社交媒体数据（可选）
            announcement_data: 公告数据（可选）
            price_data: 价格数据（可选）
            
        返回:
            Dict[str, pd.Series]: 因子名称到因子值的映射
        """
        all_factors = {}
        
        # 计算各类因子
        if search_data is not None and not search_data.empty:
            search_factors = self.calculate_search_index_factor(search_data, price_data)
            all_factors.update(search_factors)
            
        if news_data is not None and not news_data.empty:
            news_factors = self.calculate_news_sentiment_factor(news_data, price_data)
            all_factors.update(news_factors)
            
        if social_data is not None and not social_data.empty:
            social_factors = self.calculate_social_sentiment_factor(social_data, price_data)
            all_factors.update(social_factors)
            
        if announcement_data is not None and not announcement_data.empty:
            announcement_factors = self.calculate_announcement_factor(announcement_data, price_data)
            all_factors.update(announcement_factors)
            
        return all_factors


class AlternativeFactors:
    """另类数据因子库，用于创建和管理另类数据因子"""
    
    def __init__(self, baidu_cookie: str = None, news_api_key: str = None, 
                social_api_key: str = None, announcement_api_key: str = None):
        """
        初始化另类数据因子库
        
        参数:
            baidu_cookie: 百度指数cookie
            news_api_key: 新闻API密钥
            social_api_key: 社交媒体API密钥
            announcement_api_key: 公告API密钥
        """
        self.baidu_index_provider = BaiduIndexProvider(cookie=baidu_cookie)
        self.news_provider = NewsProvider(api_key=news_api_key)
        self.social_media_provider = SocialMediaProvider(api_key=social_api_key)
        self.announcement_provider = AnnouncementProvider(api_key=announcement_api_key)
        
        self.text_analyzer = TextAnalyzer()
        self.factor_calculator = AlternativeFactorCalculator(self.text_analyzer)
        
    def create_search_index_factor(self, keyword: str, start_date: str, end_date: str,
                                price_data: pd.DataFrame = None, name: str = "search_index",
                                description: str = "搜索指数因子", category: str = "搜索") -> pd.Series:
        """
        创建搜索指数因子
        
        参数:
            keyword: 关键词
            start_date: 开始日期
            end_date: 结束日期
            price_data: 价格数据（可选）
            name: 因子名称
            description: 因子描述
            category: 因子类别
            
        返回:
            pd.Series: 因子数据
        """
        # 获取搜索指数数据
        search_data = self.baidu_index_provider.get_alternative_data("search_index", 
                                                                  keyword=keyword, 
                                                                  start_date=start_date, 
                                                                  end_date=end_date)
                                                                  
        if search_data is None or search_data.empty:
            logging.warning(f"未能获取 {keyword} 的搜索指数数据")
            return None
            
        # 计算因子
        factors = self.factor_calculator.calculate_search_index_factor(search_data, price_data)
        
        if name in factors:
            return factors[name]
        elif len(factors) > 0:
            # 返回第一个因子
            return list(factors.values())[0]
        else:
            return None
            
    def create_news_sentiment_factor(self, symbol: str, start_date: str, end_date: str,
                                  price_data: pd.DataFrame = None, name: str = "news_sentiment",
                                  description: str = "新闻情感因子", category: str = "情感") -> pd.Series:
        """
        创建新闻情感因子
        
        参数:
            symbol: 股票代码
            start_date: 开始日期
            end_date: 结束日期
            price_data: 价格数据（可选）
            name: 因子名称
            description: 因子描述
            category: 因子类别
            
        返回:
            pd.Series: 因子数据
        """
        # 获取新闻数据
        news_data = self.news_provider.get_alternative_data("company_news", 
                                                         symbol=symbol, 
                                                         start_date=start_date, 
                                                         end_date=end_date)
                                                         
        if news_data is None or news_data.empty:
            logging.warning(f"未能获取 {symbol} 的新闻数据")
            return None
            
        # 计算因子
        factors = self.factor_calculator.calculate_news_sentiment_factor(news_data, price_data)
        
        if name in factors:
            return factors[name]
        elif len(factors) > 0:
            # 返回第一个因子
            return list(factors.values())[0]
        else:
            return None
            
    def create_social_sentiment_factor(self, keyword: str, start_date: str, end_date: str,
                                    price_data: pd.DataFrame = None, platform: str = "all",
                                    name: str = "social_sentiment", description: str = "社交情感因子",
                                    category: str = "情感") -> pd.Series:
        """
        创建社交情感因子
        
        参数:
            keyword: 关键词
            start_date: 开始日期
            end_date: 结束日期
            price_data: 价格数据（可选）
            platform: 平台，可选值：'weibo', 'wechat', 'all'
            name: 因子名称
            description: 因子描述
            category: 因子类别
            
        返回:
            pd.Series: 因子数据
        """
        # 获取社交媒体数据
        social_data = self.social_media_provider.get_alternative_data("social_sentiment", 
                                                                   keyword=keyword, 
                                                                   start_date=start_date, 
                                                                   end_date=end_date,
                                                                   platform=platform)
                                                                   
        if social_data is None or social_data.empty:
            logging.warning(f"未能获取 {keyword} 的社交媒体数据")
            return None
            
        # 计算因子
        factors = self.factor_calculator.calculate_social_sentiment_factor(social_data, price_data)
        
        if name in factors:
            return factors[name]
        elif len(factors) > 0:
            # 返回第一个因子
            return list(factors.values())[0]
        else:
            return None
            
    def create_announcement_factor(self, symbol: str, start_date: str, end_date: str,
                                price_data: pd.DataFrame = None, category: str = "all",
                                name: str = "announcement_sentiment", 
                                description: str = "公告情感因子",
                                factor_category: str = "情感") -> pd.Series:
        """
        创建公告因子
        
        参数:
            symbol: 股票代码
            start_date: 开始日期
            end_date: 结束日期
            price_data: 价格数据（可选）
            category: 公告类别，可选值：'finance', 'operation', 'other', 'all'
            name: 因子名称
            description: 因子描述
            factor_category: 因子类别
            
        返回:
            pd.Series: 因子数据
        """
        # 获取公告数据
        announcement_data = self.announcement_provider.get_alternative_data("announcements", 
                                                                         symbol=symbol, 
                                                                         start_date=start_date, 
                                                                         end_date=end_date,
                                                                         category=category)
                                                                         
        if announcement_data is None or announcement_data.empty:
            logging.warning(f"未能获取 {symbol} 的公告数据")
            return None
            
        # 计算因子
        factors = self.factor_calculator.calculate_announcement_factor(announcement_data, price_data)
        
        if name in factors:
            return factors[name]
        elif len(factors) > 0:
            # 返回第一个因子
            return list(factors.values())[0]
        else:
            return None
            
    def create_all_alternative_factors(self, symbol: str, keyword: str, 
                                    start_date: str, end_date: str,
                                    price_data: pd.DataFrame = None) -> Dict[str, pd.Series]:
        """
        创建所有另类因子
        
        参数:
            symbol: 股票代码
            keyword: 关键词
            start_date: 开始日期
            end_date: 结束日期
            price_data: 价格数据（可选）
            
        返回:
            Dict[str, pd.Series]: 因子名称到因子值的映射
        """
        # 获取各类数据
        search_data = self.baidu_index_provider.get_alternative_data("search_index", 
                                                                  keyword=keyword, 
                                                                  start_date=start_date, 
                                                                  end_date=end_date)
                                                                  
        news_data = self.news_provider.get_alternative_data("company_news", 
                                                         symbol=symbol, 
                                                         start_date=start_date, 
                                                         end_date=end_date)
                                                         
        social_data = self.social_media_provider.get_alternative_data("social_sentiment", 
                                                                   keyword=keyword, 
                                                                   start_date=start_date, 
                                                                   end_date=end_date)
                                                                   
        announcement_data = self.announcement_provider.get_alternative_data("announcements", 
                                                                         symbol=symbol, 
                                                                         start_date=start_date, 
                                                                         end_date=end_date)
                                                                         
        # 计算因子
        return self.factor_calculator.calculate_all_alternative_factors(
            symbol, search_data, news_data, social_data, announcement_data, price_data
        )
        
    def analyze_text_sentiment(self, text: str) -> float:
        """
        分析文本情感
        
        参数:
            text: 文本
            
        返回:
            float: 情感得分，-1（极负面）到1（极正面）
        """
        return self.text_analyzer.analyze_sentiment(text)
        
    def extract_text_keywords(self, text: str, top_n: int = 5) -> List[str]:
        """
        提取文本关键词
        
        参数:
            text: 文本
            top_n: 返回的关键词数量
            
        返回:
            List[str]: 关键词列表
        """
        return self.text_analyzer.extract_keywords(text, top_n)