#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
文本处理模块
用于对新闻文本进行预处理、分词、去停用词等操作
"""

import os
import re
import json
import yaml
import logging
import jieba
import jieba.analyse
import pandas as pd
import numpy as np
from pathlib import Path
from collections import Counter
from snownlp import SnowNLP
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from datetime import datetime

# 导入大语言模型分析器
from src.processors.llm_analyzer import LLMAnalyzer

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class TextProcessor:
    """文本处理类，负责对新闻文本进行预处理和分析"""
    
    def __init__(self, config_path='config/config.yaml'):
        """
        初始化文本处理器
        
        Args:
            config_path: 配置文件路径
        """
        self.config = self._load_config(config_path)
        self.sentiment_model = self.config['analysis']['sentiment']['model']
        self.threshold_positive = self.config['analysis']['sentiment']['threshold_positive']
        self.threshold_negative = self.config['analysis']['sentiment']['threshold_negative']
        
        # 加载自定义词典
        if self.config['analysis']['entity_recognition']['use_custom_dict']:
            self._load_custom_dict()
        
        # 初始化大语言模型分析器
        self.use_llm = self.config.get('llm_analysis', {}).get('enabled', False)
        if self.use_llm:
            self.llm_analyzer = LLMAnalyzer(config_path)
            logger.info("大语言模型分析器已初始化")
        else:
            self.llm_analyzer = None
            logger.info("大语言模型分析未启用")
        
        logger.info("文本处理器初始化完成")
    
    def _load_config(self, config_path):
        """
        加载配置文件
        
        Args:
            config_path: 配置文件路径
            
        Returns:
            dict: 配置信息
        """
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                config = yaml.safe_load(f)
            logger.info(f"成功加载配置文件: {config_path}")
            return config
        except Exception as e:
            logger.error(f"加载配置文件失败: {e}")
            raise
    
    def _load_custom_dict(self):
        """加载自定义词典（股票名称、代码等）"""
        try:
            dict_path = self.config['analysis']['entity_recognition']['custom_dict_path']
            if os.path.exists(dict_path):
                jieba.load_userdict(dict_path)
                logger.info(f"成功加载自定义词典: {dict_path}")
            else:
                logger.warning(f"自定义词典不存在: {dict_path}")
        except Exception as e:
            logger.error(f"加载自定义词典失败: {e}")
    
    def load_news(self, news_file):
        """
        加载新闻数据
        
        Args:
            news_file: 新闻数据文件路径
            
        Returns:
            list: 新闻列表
        """
        try:
            with open(news_file, 'r', encoding='utf-8') as f:
                news_list = json.load(f)
            logger.info(f"成功加载 {len(news_list)} 条新闻数据")
            return news_list
        except Exception as e:
            logger.error(f"加载新闻数据失败: {e}")
            return []
    
    def preprocess_text(self, text):
        """
        预处理文本
        
        Args:
            text: 原始文本
            
        Returns:
            str: 预处理后的文本
        """
        if not text:
            return ""
        
        # 去除HTML标签
        text = re.sub(r'<.*?>', '', text)
        
        # 去除URL
        text = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', text)
        
        # 去除特殊字符和数字
        text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z]', ' ', text)
        
        # 去除多余空格
        text = re.sub(r'\s+', ' ', text).strip()
        
        return text
    
    def segment_text(self, text):
        """
        对文本进行分词
        
        Args:
            text: 预处理后的文本
            
        Returns:
            list: 分词结果
        """
        if not text:
            return []
        
        # 使用jieba进行分词
        words = jieba.lcut(text)
        
        # 去除停用词
        stopwords_set = self._get_stopwords()
        words = [word for word in words if word not in stopwords_set and len(word) > 1]
        
        return words
    
    def _get_stopwords(self):
        """
        获取停用词集合
        
        Returns:
            set: 停用词集合
        """
        stopwords_set = set()
        
        # 加载中文停用词
        try:
            stopwords_file = 'models/stopwords_cn.txt'
            if os.path.exists(stopwords_file):
                with open(stopwords_file, 'r', encoding='utf-8') as f:
                    for line in f:
                        stopwords_set.add(line.strip())
        except Exception as e:
            logger.warning(f"加载中文停用词失败: {e}")
        
        return stopwords_set
    
    def extract_keywords(self, text, top_k=10):
        """
        提取文本关键词
        
        Args:
            text: 文本内容
            top_k: 提取的关键词数量
            
        Returns:
            list: 关键词列表，每个元素为(word, weight)
        """
        if not text:
            return []
        
        # 使用jieba提取关键词
        keywords = jieba.analyse.textrank(text, topK=top_k, withWeight=True)
        
        return keywords
    
    def analyze_sentiment(self, text):
        """
        分析文本情感倾向
        
        Args:
            text: 文本内容
            
        Returns:
            dict: 情感分析结果，包含分数和标签
        """
        if not text:
            return {'score': 0.5, 'label': 'neutral'}
        
        try:
            if self.sentiment_model == 'snownlp':
                # 使用SnowNLP进行情感分析
                s = SnowNLP(text)
                score = s.sentiments
            else:
                # 默认使用SnowNLP
                s = SnowNLP(text)
                score = s.sentiments
            
            # 根据阈值确定情感标签
            if score >= self.threshold_positive:
                label = 'positive'
            elif score <= self.threshold_negative:
                label = 'negative'
            else:
                label = 'neutral'
            
            return {
                'score': score,
                'label': label
            }
        
        except Exception as e:
            logger.error(f"情感分析失败: {e}")
            return {'score': 0.5, 'label': 'neutral'}
    
    def extract_entities(self, text):
        """
        提取文本中的实体（公司名称、股票代码等）
        
        Args:
            text: 文本内容
            
        Returns:
            dict: 提取的实体，按类型分类
        """
        entities = {
            'companies': [],
            'stocks': [],
            'industries': []
        }
        
        if not text:
            return entities
        
        try:
            # 提取股票代码（假设格式为：数字+字母，如600000.SH, 000001.SZ）
            stock_codes = re.findall(r'[0-9]{6}\.[A-Z]{2}', text)
            entities['stocks'].extend(stock_codes)
            
            # 提取公司名称（依赖自定义词典）
            words = self.segment_text(text)
            
            # 这里需要一个公司名称的词典来匹配
            # 简单示例，实际应用中需要更复杂的逻辑
            company_keywords = ['公司', '集团', '企业', '股份']
            for word in words:
                if any(keyword in word for keyword in company_keywords) and len(word) >= 4:
                    entities['companies'].append(word)
            
            # 提取行业名称
            industries = self.config['stock_market']['industries']
            for industry in industries:
                if industry in text:
                    entities['industries'].append(industry)
            
            # 去重
            for key in entities:
                entities[key] = list(set(entities[key]))
            
        except Exception as e:
            logger.error(f"实体提取失败: {e}")
        
        return entities
    
    def process_news(self, news_list):
        """
        处理新闻列表
        
        Args:
            news_list: 新闻列表
            
        Returns:
            list: 处理后的新闻列表
        """
        processed_news = []
        
        for news in news_list:
            try:
                # 预处理文本
                title = news.get('title', '')
                content = news.get('content', '')
                
                processed_title = self.preprocess_text(title)
                processed_content = self.preprocess_text(content)
                
                # 合并标题和内容进行分析
                full_text = processed_title + ' ' + processed_content
                
                # 分词
                words = self.segment_text(full_text)
                
                # 提取关键词
                keywords = self.extract_keywords(full_text)
                
                # 情感分析
                sentiment = self.analyze_sentiment(full_text)
                
                # 实体提取
                entities = self.extract_entities(full_text)
                
                # 构建处理后的新闻对象
                processed_item = {
                    'original': news,
                    'processed': {
                        'title': processed_title,
                        'content': processed_content,
                        'words': words,
                        'keywords': keywords,
                        'sentiment': sentiment,
                        'entities': entities
                    }
                }
                
                processed_news.append(processed_item)
                
            except Exception as e:
                logger.error(f"处理新闻失败: {e}")
                continue
        
        logger.info(f"成功处理 {len(processed_news)} 条新闻")
        
        # 使用大语言模型进行深度分析
        if self.use_llm and processed_news:
            try:
                # 将处理后的新闻转换为适合LLM分析的格式
                llm_news_list = [
                    {
                        'title': news['original'].get('title', ''),
                        'content': news['original'].get('content', ''),
                        'date': news['original'].get('date', ''),
                        'source': news['original'].get('source', '')
                    }
                    for news in processed_news
                ]
                
                # 使用大语言模型分析新闻
                llm_analysis = self.llm_analyzer.analyze_news(llm_news_list)
                
                # 将LLM分析结果添加到处理结果中
                for item in processed_news:
                    item['llm_analysis'] = llm_analysis
                
                logger.info("大语言模型新闻分析完成")
            except Exception as e:
                logger.error(f"大语言模型分析失败: {e}")
        
        return processed_news
    
    def analyze_with_llm(self, processed_data, stock_data=None):
        """
        使用大语言模型进行深度分析
        
        Args:
            processed_data: 处理后的数据
            stock_data: 股票数据（可选）
            
        Returns:
            dict: 大语言模型分析结果
        """
        if not self.use_llm:
            logger.warning("大语言模型分析未启用")
            return {}
        
        try:
            # 提取新闻分析结果
            news_analysis = processed_data[0].get('llm_analysis', {}) if processed_data else {}
            
            # 如果有股票数据，进行股票深度分析
            if stock_data:
                stock_analysis = self.llm_analyzer.analyze_stocks(stock_data, news_analysis)
                
                # 读取最新的策略报告
                latest_strategy = self._get_latest_strategy_report()
                
                # 生成市场评论
                market_commentary = self.llm_analyzer.generate_market_commentary(stock_analysis, latest_strategy)
                
                return {
                    'news_analysis': news_analysis,
                    'stock_analysis': stock_analysis,
                    'market_commentary': market_commentary
                }
            else:
                return {'news_analysis': news_analysis}
                
        except Exception as e:
            logger.error(f"大语言模型深度分析失败: {e}")
            return {}
    
    def _get_latest_strategy_report(self):
        """
        获取最新的策略报告
        
        Returns:
            dict: 最新的策略报告
        """
        try:
            results_path = self.config['system']['data_storage']['results_path']
            strategy_pattern = os.path.join(results_path, 'strategy_report_*.json')
            
            # 获取所有策略报告文件
            import glob
            strategy_files = glob.glob(strategy_pattern)
            
            if not strategy_files:
                logger.warning("没有找到策略报告文件")
                return {}
            
            # 获取最新的策略报告
            latest_file = max(strategy_files, key=os.path.getctime)
            
            with open(latest_file, 'r', encoding='utf-8') as f:
                strategy_report = json.load(f)
            
            logger.info(f"成功加载最新策略报告: {latest_file}")
            return strategy_report
            
        except Exception as e:
            logger.error(f"获取最新策略报告失败: {e}")
            return {}
    
    def save_processed_news(self, processed_news, output_path=None):
        """
        保存处理后的新闻
        
        Args:
            processed_news: 处理后的新闻列表
            output_path: 输出路径，默认为None（使用配置中的路径）
            
        Returns:
            str: 保存的文件路径
        """
        if not processed_news:
            logger.warning("没有处理后的新闻需要保存")
            return None
        
        try:
            # 确定输出路径
            if output_path is None:
                output_path = self.config['system']['data_storage']['results_path']
            
            # 确保目录存在
            Path(output_path).mkdir(parents=True, exist_ok=True)
            
            # 生成文件名
            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
            filename = f"processed_news_{timestamp}.json"
            filepath = os.path.join(output_path, filename)
            
            # 保存为JSON文件
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(processed_news, f, ensure_ascii=False, indent=2)
            
            logger.info(f"成功保存 {len(processed_news)} 条处理后的新闻到 {filepath}")
            
            # 如果有大语言模型分析结果，单独保存
            if self.use_llm and processed_news and 'llm_analysis' in processed_news[0]:
                llm_output_path = self.config.get('llm_analysis', {}).get('output_path', 'data/results/llm_analysis/')
                Path(llm_output_path).mkdir(parents=True, exist_ok=True)
                
                llm_filename = f"llm_analysis_{timestamp}.json"
                llm_filepath = os.path.join(llm_output_path, llm_filename)
                
                with open(llm_filepath, 'w', encoding='utf-8') as f:
                    json.dump(processed_news[0]['llm_analysis'], f, ensure_ascii=False, indent=2)
                
                logger.info(f"成功保存大语言模型分析结果到 {llm_filepath}")
            
            return filepath
        
        except Exception as e:
            logger.error(f"保存处理后的新闻失败: {e}")
            return None
    
    def save_llm_analysis(self, llm_analysis, output_path=None):
        """
        保存大语言模型分析结果
        
        Args:
            llm_analysis: 大语言模型分析结果
            output_path: 输出路径，默认为None（使用配置中的路径）
            
        Returns:
            str: 保存的文件路径
        """
        if not llm_analysis:
            logger.warning("没有大语言模型分析结果需要保存")
            return None
        
        try:
            # 确定输出路径
            if output_path is None:
                output_path = self.config.get('llm_analysis', {}).get('output_path', 'data/results/llm_analysis/')
            
            # 确保目录存在
            Path(output_path).mkdir(parents=True, exist_ok=True)
            
            # 生成文件名
            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
            filename = f"llm_deep_analysis_{timestamp}.json"
            filepath = os.path.join(output_path, filename)
            
            # 保存为JSON文件
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(llm_analysis, f, ensure_ascii=False, indent=2)
            
            logger.info(f"成功保存大语言模型深度分析结果到 {filepath}")
            return filepath
        
        except Exception as e:
            logger.error(f"保存大语言模型分析结果失败: {e}")
            return None

def main():
    """主函数"""
    import sys
    
    try:
        # 检查命令行参数
        if len(sys.argv) < 2:
            print("用法: python text_processor.py <news_file_path>")
            return
        
        news_file = sys.argv[1]
        
        # 创建文本处理器
        processor = TextProcessor()
        
        # 加载新闻数据
        news_list = processor.load_news(news_file)
        
        if not news_list:
            print("没有找到新闻数据或数据为空")
            return
        
        # 处理新闻
        processed_news = processor.process_news(news_list)
        
        # 保存处理后的新闻
        saved_path = processor.save_processed_news(processed_news)
        
        print(f"成功处理并保存了 {len(processed_news)} 条新闻到 {saved_path}")
        
        # 如果启用了大语言模型，可以进一步分析
        if processor.use_llm:
            # 在这里可以加载股票数据进行更深入的分析
            # stock_data = load_stock_data()
            # llm_analysis = processor.analyze_with_llm(processed_news, stock_data)
            # processor.save_llm_analysis(llm_analysis)
            print("大语言模型分析已完成")
        
    except Exception as e:
        logger.error(f"文本处理失败: {e}")

if __name__ == "__main__":
    main() 