import torch
from transformers import BertTokenizer, BertForSequenceClassification, pipeline
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import re
import time
from collections import defaultdict, deque

class NewsImpactAnalyzer:
    """Transformer新闻影响分析器，用于新闻事件影响量化"""
    def __init__(self, config=None):
        self.config = config or {
            'model_name': 'bert-base-chinese',
            'max_length': 512,
            'batch_size': 8,
            'sentiment_threshold': 0.7,
            'relevance_threshold': 0.5
        }
        
        # 初始化模型和tokenizer
        self.tokenizer = BertTokenizer.from_pretrained(self.config['model_name'])
        self.sentiment_model = BertForSequenceClassification.from_pretrained(
            self.config['model_name'],
            num_labels=3  # 正面、中性、负面
        )
        
        # 初始化情感分析pipeline
        self.sentiment_analyzer = pipeline(
            "sentiment-analysis",
            model=self.sentiment_model,
            tokenizer=self.tokenizer,
            device=0 if torch.cuda.is_available() else -1
        )
        
        # 加载金融领域词汇权重
        self.financial_vocab_weights = self._load_financial_vocab()
        
        # 新闻缓存和历史记录
        self.news_cache = deque(maxlen=10000)
        self.impact_history = {}
        self.ticker_news_map = defaultdict(deque)
        
        # 事件影响衰减参数
        self.decay_factor = 0.95  # 每条新闻每分钟的衰减因子
    
    def _load_financial_vocab(self):
        """加载金融领域词汇及其权重"""
        # 这里使用预定义的金融词汇权重，实际应用中可以从外部文件加载
        financial_vocab = {
            '政策': 2.0,
            '利好': 1.5,
            '利空': -1.5,
            '加息': -1.2,
            '降息': 1.2,
            '监管': -0.8,
            '税收': -0.6,
            '补贴': 1.0,
            '增长': 0.8,
            '下滑': -0.8,
            '盈利': 0.7,
            '亏损': -0.7,
            '重组': 1.0,
            '收购': 0.9,
            '减持': -1.0,
            '增持': 1.0,
            'IPO': 0.5,
            '融资': 0.6,
            '违约': -1.5,
            '风险': -1.0,
            '机会': 0.8,
            '创新': 0.7
        }
        return financial_vocab
    
    def clean_news_text(self, text):
        """清洗新闻文本"""
        # 移除HTML标签
        text = re.sub(r'<[^>]+>', '', text)
        # 移除特殊字符
        text = re.sub(r'[\n\t\r]+', ' ', text)
        # 移除多余空格
        text = re.sub(r'\s+', ' ', text)
        # 转为小写
        text = text.lower()
        return text.strip()
    
    def extract_tickers_from_news(self, text, known_tickers=None):
        """从新闻文本中提取股票代码"""
        # 简单的股票代码提取逻辑，实际应用中可以使用更复杂的NER模型
        tickers = set()
        
        # 匹配A股代码（6位数字）
        a_share_matches = re.findall(r'[0-9]{6}', text)
        for match in a_share_matches:
            # 简单验证是否为有效的A股代码
            if 100000 <= int(match) <= 699999:
                tickers.add(match)
        
        # 如果提供了已知股票列表，进行文本匹配
        if known_tickers:
            for ticker, name in known_tickers.items():
                if name in text or ticker in text:
                    tickers.add(ticker)
        
        return list(tickers)
    
    def analyze_news_sentiment(self, news_text):
        """分析新闻情感"""
        # 清洗文本
        clean_text = self.clean_news_text(news_text)
        
        # 如果文本太长，截取前max_length个字符
        if len(clean_text) > self.config['max_length']:
            clean_text = clean_text[:self.config['max_length']]
        
        # 使用BERT进行情感分析
        start_time = time.time()
        try:
            result = self.sentiment_analyzer(clean_text)[0]
            analysis_time = (time.time() - start_time) * 1000  # 毫秒
            
            # 映射情感标签到分数
            sentiment_mapping = {
                'positive': 1.0,
                'negative': -1.0,
                'neutral': 0.0
            }
            
            # 如果模型没有返回positive/negative标签，使用分数判断
            if result['label'] not in sentiment_mapping:
                score = result['score']
                if score > 0.6:
                    sentiment_label = 'positive'
                elif score < 0.4:
                    sentiment_label = 'negative'
                else:
                    sentiment_label = 'neutral'
            else:
                sentiment_label = result['label']
                score = result['score']
            
            return {
                'sentiment': sentiment_label,
                'sentiment_score': sentiment_mapping[sentiment_label] * score,
                'confidence': score,
                'analysis_time_ms': analysis_time
            }
        except Exception as e:
            print(f"Error analyzing sentiment: {str(e)}")
            return {
                'sentiment': 'neutral',
                'sentiment_score': 0.0,
                'confidence': 0.5,
                'analysis_time_ms': (time.time() - start_time) * 1000
            }
    
    def calculate_news_impact(self, news_item, known_tickers=None):
        """计算新闻对市场的影响"""
        # 提取新闻信息
        news_id = news_item.get('id', str(hash(news_item.get('title', '') + str(news_item.get('timestamp', 0)))))
        title = news_item.get('title', '')
        content = news_item.get('content', '')
        timestamp = news_item.get('timestamp', int(time.time()))
        source = news_item.get('source', 'unknown')
        
        # 如果是字符串时间戳，转换为整数
        if isinstance(timestamp, str):
            try:
                dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
                timestamp = int(dt.timestamp())
            except:
                timestamp = int(time.time())
        
        # 合并标题和内容进行分析
        full_text = title + ' ' + content
        
        # 分析情感
        sentiment_result = self.analyze_news_sentiment(full_text)
        
        # 提取相关股票
        related_tickers = self.extract_tickers_from_news(full_text, known_tickers)
        
        # 计算金融词汇权重
        financial_score = 0.0
        for word, weight in self.financial_vocab_weights.items():
            if word in full_text:
                # 计算词频
                count = full_text.count(word)
                financial_score += weight * min(count, 5)  # 限制单个词的最大影响
        
        # 归一化金融词汇分数
        if financial_score != 0:
            financial_score = min(max(financial_score, -5.0), 5.0) / 5.0
        
        # 计算整体影响分数
        sentiment_based_impact = sentiment_result['sentiment_score']
        financial_based_impact = financial_score
        
        # 结合情感分析和金融词汇分析的结果
        # 权重可以根据实际效果调整
        total_impact = 0.6 * sentiment_based_impact + 0.4 * financial_based_impact
        
        # 根据新闻来源的可信度调整影响分数
        source_credibility = self._get_source_credibility(source)
        total_impact *= source_credibility
        
        # 构建影响分析结果
        impact_result = {
            'news_id': news_id,
            'title': title,
            'timestamp': timestamp,
            'source': source,
            'related_tickers': related_tickers,
            'sentiment': sentiment_result['sentiment'],
            'sentiment_score': sentiment_result['sentiment_score'],
            'financial_score': financial_based_impact,
            'total_impact': total_impact,
            'confidence': sentiment_result['confidence'],
            'analysis_time_ms': sentiment_result['analysis_time_ms']
        }
        
        # 更新缓存
        self._update_caches(impact_result)
        
        return impact_result
    
    def _get_source_credibility(self, source):
        """获取新闻来源的可信度"""
        # 预定义的新闻来源可信度，实际应用中可以从外部配置加载
        credible_sources = {
            '新华社': 0.95,
            '人民日报': 0.95,
            '中国证券报': 0.90,
            '上海证券报': 0.90,
            '证券时报': 0.90,
            '第一财经': 0.85,
            '财经网': 0.80,
            '界面新闻': 0.80
        }
        
        # 默认可信度
        default_credibility = 0.7
        
        for source_name, credibility in credible_sources.items():
            if source_name in source:
                return credibility
        
        return default_credibility
    
    def _update_caches(self, impact_result):
        """更新新闻缓存"""
        # 添加到新闻缓存
        self.news_cache.append(impact_result)
        
        # 更新股票-新闻映射
        for ticker in impact_result['related_tickers']:
            self.ticker_news_map[ticker].append({
                'news_id': impact_result['news_id'],
                'timestamp': impact_result['timestamp'],
                'impact': impact_result['total_impact'],
                'title': impact_result['title']
            })
        
        # 记录影响历史
        self.impact_history[impact_result['news_id']] = impact_result
    
    def get_ticker_news_impact(self, ticker, time_window=3600):  # 默认1小时
        """获取特定股票在时间窗口内的累计新闻影响"""
        current_time = int(time.time())
        window_start = current_time - time_window
        
        # 获取该股票的相关新闻
        ticker_news = self.ticker_news_map.get(ticker, [])
        
        # 计算时间衰减后的累计影响
        accumulated_impact = 0.0
        recent_news = []
        
        for news in ticker_news:
            if news['timestamp'] >= window_start:
                # 计算时间衰减因子
                time_diff_minutes = (current_time - news['timestamp']) / 60
                decay = self.decay_factor ** time_diff_minutes
                
                # 应用衰减并累加影响
                decayed_impact = news['impact'] * decay
                accumulated_impact += decayed_impact
                
                # 记录最近新闻
                recent_news.append({
                    'title': news['title'],
                    'timestamp': news['timestamp'],
                    'original_impact': news['impact'],
                    'decayed_impact': decayed_impact
                })
        
        # 按时间排序最近新闻
        recent_news.sort(key=lambda x: x['timestamp'], reverse=True)
        
        return {
            'ticker': ticker,
            'accumulated_impact': accumulated_impact,
            'news_count': len(recent_news),
            'recent_news': recent_news[:10],  # 最多返回10条
            'window_start': window_start,
            'window_end': current_time
        }
    
    def detect_event_driven_opportunities(self, time_window=3600, impact_threshold=0.8):
        """检测事件驱动的交易机会"""
        opportunities = []
        current_time = int(time.time())
        window_start = current_time - time_window
        
        # 遍历所有有新闻的股票
        for ticker, news_list in self.ticker_news_map.items():
            # 计算该股票的累计影响
            impact_data = self.get_ticker_news_impact(ticker, time_window)
            accumulated_impact = impact_data['accumulated_impact']
            
            # 检查是否达到影响阈值
            if abs(accumulated_impact) >= impact_threshold:
                # 确定机会类型
                if accumulated_impact > 0:
                    opportunity_type = 'bullish'
                else:
                    opportunity_type = 'bearish'
                
                # 添加交易机会
                opportunities.append({
                    'ticker': ticker,
                    'opportunity_type': opportunity_type,
                    'impact_score': accumulated_impact,
                    'news_count': impact_data['news_count'],
                    'latest_news_title': impact_data['recent_news'][0]['title'] if impact_data['recent_news'] else '',
                    'detection_time': current_time
                })
        
        # 按影响分数排序
        opportunities.sort(key=lambda x: abs(x['impact_score']), reverse=True)
        
        return {
            'opportunities': opportunities[:20],  # 最多返回20个机会
            'total_opportunities': len(opportunities),
            'time_window': time_window,
            'impact_threshold': impact_threshold,
            'detection_time': current_time
        }
    
    def batch_analyze_news(self, news_items, known_tickers=None):
        """批量分析新闻"""
        results = []
        
        start_time = time.time()
        
        for i in range(0, len(news_items), self.config['batch_size']):
            batch = news_items[i:i+self.config['batch_size']]
            batch_results = []
            
            for news in batch:
                try:
                    result = self.calculate_news_impact(news, known_tickers)
                    batch_results.append(result)
                except Exception as e:
                    print(f"Error analyzing news in batch: {str(e)}")
                    # 添加错误结果
                    batch_results.append({
                        'news_id': news.get('id', 'error'),
                        'error': str(e),
                        'timestamp': int(time.time())
                    })
            
            results.extend(batch_results)
        
        total_time = (time.time() - start_time) * 1000  # 毫秒
        avg_time_per_news = total_time / len(news_items) if news_items else 0
        
        return {
            'results': results,
            'total_news': len(news_items),
            'total_analysis_time_ms': total_time,
            'avg_analysis_time_per_news_ms': avg_time_per_news
        }
    
    def save_model(self, path='models/news_impact_model'):
        """保存模型"""
        try:
            self.sentiment_model.save_pretrained(path)
            self.tokenizer.save_pretrained(path)
            print(f"News impact model saved to {path}")
            return True
        except Exception as e:
            print(f"Failed to save news impact model: {str(e)}")
            return False
    
    def load_model(self, path='models/news_impact_model'):
        """加载模型"""
        try:
            self.sentiment_model = BertForSequenceClassification.from_pretrained(path)
            self.tokenizer = BertTokenizer.from_pretrained(path)
            
            # 重新初始化情感分析pipeline
            self.sentiment_analyzer = pipeline(
                "sentiment-analysis",
                model=self.sentiment_model,
                tokenizer=self.tokenizer,
                device=0 if torch.cuda.is_available() else -1
            )
            
            print(f"News impact model loaded from {path}")
            return True
        except Exception as e:
            print(f"Failed to load news impact model: {str(e)}")
            return False