"""
NLP 自然语言处理路由模块
提供金融新闻预处理、分词、实体识别等功能
"""

from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from typing import List, Dict, Any, Optional
import re
import hashlib
from datetime import datetime
import jieba
import jieba.analyse
from bs4 import BeautifulSoup

router = APIRouter(prefix="/nlp", tags=["NLP"])


class NewsItem(BaseModel):
    """新闻项目数据模型"""
    title: Optional[str] = None
    content: Optional[str] = None
    headline: Optional[str] = None
    description: Optional[str] = None
    summary: Optional[str] = None
    pubDate: Optional[str] = None
    publishTime: Optional[str] = None
    link: Optional[str] = None
    url: Optional[str] = None
    source: Optional[str] = None


class NewsPreprocessRequest(BaseModel):
    """新闻预处理请求模型"""
    items: List[Dict[str, Any]]


class SegmentRequest(BaseModel):
    """分词请求模型"""
    text: str
    topK: Optional[int] = 10


class FinancialNewsPreprocessor:
    """金融新闻预处理器"""
    
    def __init__(self):
        # 初始化jieba分词器
        jieba.initialize()
        
        # 金融关键词
        self.financial_keywords = [
            '股票', '债券', '基金', '期货', '外汇', 'A股', '美股', '港股',
            '创业板', '科创板', '沪深', '上证', '深证', '中小板', '新三板',
            'IPO', '并购', '重组', '定增', '减持', '涨停', '跌停',
            '牛市', '熊市', '利好', '利空', '央行', '货币政策',
            '降准', '降息', 'GDP', 'CPI', 'PPI', 'PMI',
            '财报', '业绩', '净利润', '营收', '市值', '估值',
            'PE', 'PB', 'ROE', '资产负债', '现金流', '分红', '派息'
        ]
        
        # 政策机构关键词
        self.policy_orgs = [
            '央行', '中国人民银行', '证监会', '中国证监会',
            '银保监会', '发改委', '财政部', '国务院', '商务部'
        ]
        
        # 行业关键词
        self.industry_keywords = [
            '新能源', 'AI', '人工智能', '生物医药', '半导体',
            '房地产', '银行', '保险', '券商', '基金公司'
        ]
    
    def clean_html_tags(self, text: str) -> str:
        """清理HTML标签"""
        if not text:
            return ""
        
        soup = BeautifulSoup(text, 'html.parser')
        cleaned_text = soup.get_text()
        
        # 清理多余的空白字符
        cleaned_text = re.sub(r'\s+', ' ', cleaned_text).strip()
        
        return cleaned_text
    
    def extract_key_info(self, title: str, content: str) -> Dict[str, Any]:
        """提取关键信息"""
        full_text = f"{title} {content}"
        
        # 提取政策信息
        policy_matches = []
        for org in self.policy_orgs:
            pattern = f"{org}.*?(政策|规定|通知|公告|决定)"
            matches = re.findall(pattern, full_text)
            policy_matches.extend(matches)
        
        # 提取公司信息（股票代码格式）
        company_pattern = r'([一-龟]{2,6})\(([0-9]{6})\)'
        company_matches = re.findall(company_pattern, full_text)
        
        # 提取行业信息
        found_industries = []
        for industry in self.industry_keywords:
            if industry in full_text:
                found_industries.append(industry)
        
        # 提取数值信息（涨跌幅、业绩等）
        percentage_pattern = r'[+\-]?[0-9]{1,3}\.?[0-9]*%'
        percentages = re.findall(percentage_pattern, full_text)
        
        return {
            'policies': policy_matches,
            'companies': company_matches,
            'industries': found_industries,
            'percentages': percentages
        }
    
    def classify_news_type(self, key_info: Dict[str, Any]) -> str:
        """新闻类型分类"""
        if key_info['policies']:
            return '宏观政策'
        elif key_info['companies']:
            return '公司公告'
        elif key_info['industries']:
            return '行业动态'
        else:
            return '未分类'
    
    def calculate_importance_score(self, title: str, content: str, key_info: Dict[str, Any]) -> float:
        """计算重要性评分"""
        score = 0.0
        
        # 标题重要性关键词
        title_keywords = ['重大', '紧急', '突发', '首次', '创新高', '暴涨', '暴跌']
        for keyword in title_keywords:
            if keyword in title:
                score += 2.0
        
        # 情感关键词
        sentiment_keywords = ['利好', '利空', '看好', '看空', '推荐', '风险']
        full_text = f"{title} {content}"
        for keyword in sentiment_keywords:
            if keyword in full_text:
                score += 1.5
        
        # 实体数量加权
        score += len(key_info['companies']) * 1.0
        score += len(key_info['policies']) * 2.0
        score += len(key_info['industries']) * 0.5
        score += len(key_info['percentages']) * 0.3
        
        return min(score, 10.0)  # 最高10分
    
    def generate_fingerprint(self, title: str, content: str) -> str:
        """生成新闻指纹（MD5）"""
        text = f"{title.strip()}{content.strip()}"
        return hashlib.md5(text.encode('utf-8')).hexdigest()
    
    def process_single_item(self, item: Dict[str, Any]) -> Dict[str, Any]:
        """处理单条新闻数据"""
        try:
            # 提取基本信息
            title = item.get('title', item.get('headline', '无标题'))
            content = item.get('content', item.get('description', item.get('summary', '')))
            pub_date = item.get('pubDate', item.get('publishTime', datetime.now().isoformat()))
            link = item.get('link', item.get('url', ''))
            source = item.get('source', '未知来源')
            
            # 清理HTML标签
            clean_title = self.clean_html_tags(title)
            clean_content = self.clean_html_tags(content)
            
            # 提取关键信息
            key_info = self.extract_key_info(clean_title, clean_content)
            
            # 分类新闻类型
            news_type = self.classify_news_type(key_info)
            
            # 计算重要性评分
            importance_score = self.calculate_importance_score(clean_title, clean_content, key_info)
            
            # 生成指纹
            fingerprint = self.generate_fingerprint(clean_title, clean_content)
            
            # 处理发布时间
            if isinstance(pub_date, str):
                try:
                    pub_date = datetime.fromisoformat(pub_date.replace('Z', '+00:00'))
                except:
                    pub_date = datetime.now()
            
            return {
                'title': clean_title,
                'content': clean_content,
                'pubDate': pub_date.isoformat(),
                'link': link,
                'source': source,
                'fingerprint': fingerprint,
                'newsType': news_type,
                'keyInfo': key_info,
                'importanceScore': importance_score,
                'processedAt': datetime.now().isoformat()
            }
            
        except Exception as e:
            return None
    
    def process_batch(self, items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """批量处理新闻数据"""
        processed_items = []
        
        for item in items:
            processed_item = self.process_single_item(item)
            if processed_item:
                processed_items.append(processed_item)
        
        return processed_items


# 创建预处理器实例
preprocessor = FinancialNewsPreprocessor()


@router.post("/preprocess")
async def preprocess_news(request: NewsPreprocessRequest):
    """
        金融新闻预处理接口（适配 n8n 格式）
        处理流程包括：
        - HTML标签清理
        - 关键信息提取（政策/公司/行业）
        - 新闻分类
        - 重要性评分
        - 生成新闻指纹(MD5)
        - 时间格式标准化
        返回结构：{ "items": [...] }
    """
    try:
        processed_items = preprocessor.process_batch(request.items)
        
        return {
            "success": True,
            "message": f"成功处理 {len(processed_items)}/{len(request.items)} 条新闻",
            "data": processed_items,
            "processedAt": datetime.now().isoformat()
        }
    
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"预处理失败: {str(e)}")



@router.post("/segment")
async def segment_text(request: SegmentRequest):
    """
    中文分词接口
    
    使用jieba对中文文本进行分词和关键词提取
    """
    try:
        # jieba分词
        words = list(jieba.cut(request.text))
        
        # 关键词提取
        keywords = jieba.analyse.extract_tags(request.text, topK=request.topK)
        
        # 词性标注（可选）
        import jieba.posseg as pseg
        pos_words = [(word, flag) for word, flag in pseg.cut(request.text)]
        
        return {
            "success": True,
            "data": {
                "words": words,
                "keywords": keywords,
                "pos_words": pos_words,
                "word_count": len(words),
                "text_length": len(request.text)
            },
            "processedAt": datetime.now().isoformat()
        }
    
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"分词失败: {str(e)}")


@router.post("/entity_recognition")
async def recognize_entities(request: SegmentRequest):
    """
    金融实体识别接口
    
    识别文本中的金融实体（公司、政策机构、行业等）
    """
    try:
        key_info = preprocessor.extract_key_info(request.text, "")
        
        # 使用jieba分词
        words = list(jieba.cut(request.text))
        
        # 识别金融关键词
        financial_terms = []
        for word in words:
            if word in preprocessor.financial_keywords:
                financial_terms.append(word)
        
        return {
            "success": True,
            "data": {
                "entities": key_info,
                "financial_terms": financial_terms,
                "words": words
            },
            "processedAt": datetime.now().isoformat()
        }
    
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"实体识别失败: {str(e)}")


@router.get("/health")
async def health_check():
    """健康检查接口"""
    return {
        "status": "ok",
        "service": "NLP Service",
        "timestamp": datetime.now().isoformat()
    }
