# -*- coding: utf-8 -*-
# Author   : ZhangQing
# Time     : 2025-07-08 6:44
# File     : risk_model.py
# Project  : risk-contagion-analysis
# Desc     :

import numpy as np
import pandas as pd
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from sklearn.model_selection import train_test_split
import logging
import text_analysis  # 导入Rust实现的模块


class RiskCalculator:
    def __init__(self, config):
        """初始化风险计算器

        Args:
            config: 配置信息，包含模型路径、参数等
        """
        self.config = config
        self.logger = self._setup_logger()
        self.tokenizer, self.model = self._load_finbert_model()

    def _setup_logger(self):
        """设置日志记录器"""
        logger = logging.getLogger("RiskCalculator")
        logger.setLevel(logging.INFO)
        handler = logging.StreamHandler()
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        return logger

    def _load_finbert_model(self):
        """加载并返回FinBERT模型"""
        try:
            model_path = self.config.get('finbert_model_path', 'yiyanghkust/finbert-tone')
            self.logger.info(f"加载FinBERT模型: {model_path}")

            tokenizer = AutoTokenizer.from_pretrained(model_path)
            model = AutoModelForSequenceClassification.from_pretrained(model_path)

            # 将模型移至GPU(如果可用)
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            model.to(device)

            self.logger.info(f"FinBERT模型已加载，运行设备: {device}")
            return tokenizer, model
        except Exception as e:
            self.logger.error(f"加载FinBERT模型失败: {str(e)}")
            raise

    def finetune_model(self, texts, labels, epochs=3, batch_size=16):
        """微调FinBERT模型以适应金融风险文本

        Args:
            texts: 训练文本列表
            labels: 标签列表 (0: 中性, 1: 正面/防护, 2: 负面/风险)
            epochs: 训练轮数
            batch_size: 批次大小
        """
        self.logger.info(f"开始微调FinBERT模型，数据量: {len(texts)}")

        # 分割训练集和验证集
        train_texts, val_texts, train_labels, val_labels = train_test_split(
            texts, labels, test_size=0.2, random_state=42
        )

        # 准备数据集
        from torch.utils.data import Dataset, DataLoader

        class FinancialDataset(Dataset):
            def __init__(self, texts, labels, tokenizer, max_length=512):
                self.encodings = tokenizer(texts, truncation=True, padding=True,
                                           max_length=max_length, return_tensors="pt")
                self.labels = torch.tensor(labels)

            def __getitem__(self, idx):
                item = {key: val[idx] for key, val in self.encodings.items()}
                item['labels'] = self.labels[idx]
                return item

            def __len__(self):
                return len(self.labels)

        train_dataset = FinancialDataset(train_texts, train_labels, self.tokenizer)
        val_dataset = FinancialDataset(val_texts, val_labels, self.tokenizer)

        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
        val_loader = DataLoader(val_dataset, batch_size=batch_size)

        # 优化器
        optimizer = torch.optim.AdamW(self.model.parameters(), lr=5e-5)

        # 训练循环
        device = next(self.model.parameters()).device
        self.model.train()

        for epoch in range(epochs):
            total_loss = 0
            for batch in train_loader:
                # 将数据移至GPU
                batch = {k: v.to(device) for k, v in batch.items()}

                # 前向传播
                outputs = self.model(**batch)
                loss = outputs.loss
                total_loss += loss.item()

                # 反向传播
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            avg_train_loss = total_loss / len(train_loader)

            # 验证
            self.model.eval()
            val_loss = 0
            correct = 0
            total = 0

            with torch.no_grad():
                for batch in val_loader:
                    batch = {k: v.to(device) for k, v in batch.items()}
                    outputs = self.model(**batch)
                    val_loss += outputs.loss.item()

                    predictions = torch.argmax(outputs.logits, dim=-1)
                    correct += (predictions == batch["labels"]).sum().item()
                    total += batch["labels"].size(0)

            val_accuracy = correct / total
            avg_val_loss = val_loss / len(val_loader)

            self.logger.info(f"Epoch {epoch + 1}/{epochs}: "
                             f"训练损失={avg_train_loss:.4f}, "
                             f"验证损失={avg_val_loss:.4f}, "
                             f"验证准确率={val_accuracy:.4f}")

            # 切回训练模式
            self.model.train()

        self.logger.info("FinBERT模型微调完成")

        # 保存微调后的模型
        model_save_path = self.config.get('model_save_path', './models/finetuned_finbert')
        self.model.save_pretrained(model_save_path)
        self.tokenizer.save_pretrained(model_save_path)
        self.logger.info(f"微调后的模型已保存到: {model_save_path}")

    def sentiment_analysis(self, text):
        """使用FinBERT模型进行情感分析

        Args:
            text: 要分析的文本

        Returns:
            dict: 包含各类情感得分的字典
        """
        # 处理输入
        inputs = self.tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
        device = next(self.model.parameters()).device
        inputs = {k: v.to(device) for k, v in inputs.items()}

        # 预测
        with torch.no_grad():
            outputs = self.model(**inputs)
            scores = torch.nn.functional.softmax(outputs.logits, dim=1)
            scores = scores.cpu().numpy()[0]

        # 返回各类情感的得分 (针对FinBERT: 0=中性, 1=正面, 2=负面)
        return {
            "neutral": float(scores[0]),
            "positive": float(scores[1]),  # 防护相关
            "negative": float(scores[2])  # 风险相关
        }

    def analyze_with_gpt(self, text):
        """使用GPT模型分析文本中的风险和防护内容

        Args:
            text: 要分析的文本

        Returns:
            dict: 包含GPT分析结果的字典
        """
        import openai

        try:
            openai.api_key = self.config['openai_api_key']

            # 构建提示词
            prompt = f"""
            请分析以下来自企业年报的文本，识别其中与数字技术风险和防护措施相关的内容:

            {text}

            请提供以下格式的JSON输出:
            1. risk_statements: 数组，包含所有关于数字技术风险的陈述
            2. protection_statements: 数组，包含所有关于防护措施的陈述
            3. risk_score: 0-1之间的风险评分，表示风险暴露程度
            4. protection_score: 0-1之间的分数，表示防护能力水平
            """

            response = openai.Completion.create(
                model="gpt-3.5-turbo-instruct",
                prompt=prompt,
                max_tokens=1000,
                temperature=0.1,
                n=1
            )

            # 处理返回结果
            import json
            try:
                result = json.loads(response.choices[0].text.strip())
                return result
            except json.JSONDecodeError:
                # 处理非标准JSON格式
                text_response = response.choices[0].text.strip()
                self.logger.warning(f"GPT返回非标准JSON格式: {text_response}")

                # 尝试提取关键信息
                import re
                risk_score_match = re.search(r'risk_score"?\s*:\s*(0\.\d+)', text_response)
                protection_score_match = re.search(r'protection_score"?\s*:\s*(0\.\d+)', text_response)

                risk_score = float(risk_score_match.group(1)) if risk_score_match else 0.5
                protection_score = float(protection_score_match.group(1)) if protection_score_match else 0.5

                return {
                    "risk_statements": [],
                    "protection_statements": [],
                    "risk_score": risk_score,
                    "protection_score": protection_score
                }

        except Exception as e:
            self.logger.error(f"GPT分析失败: {str(e)}")
            # 返回默认值
            return {
                "risk_statements": [],
                "protection_statements": [],
                "risk_score": 0.5,
                "protection_score": 0.5
            }

    def cross_validate(self, text):
        """使用多模型交叉验证文本分析结果

        Args:
            text: 要分析的文本

        Returns:
            dict: 包含交叉验证后的分析结果
        """
        # 使用FinBERT分析
        finbert_result = self.sentiment_analysis(text)

        # 使用GPT分析
        gpt_result = self.analyze_with_gpt(text)

        # 使用Rust实现的高性能文本分析
        # 将长文本分段处理，每段最多2000字符
        text_chunks = [text[i:i + 2000] for i in range(0, len(text), 2000)]
        rust_results = text_analysis.batch_process_texts(text_chunks)

        # 计算Rust分析的平均得分
        avg_risk_score = sum(score[0] for score in rust_results) / len(rust_results)
        avg_protection_score = sum(score[1] for score in rust_results) / len(rust_results)

        # 分析否定表达
        negation_results = text_analysis.analyze_text_negation(text)
        negation_rate = sum(1 for _, is_negation in negation_results if is_negation) / len(
            negation_results) if negation_results else 0

        # 综合多模型结果
        combined_risk_score = (
                finbert_result["negative"] * 0.4 +
                gpt_result["risk_score"] * 0.4 +
                (avg_risk_score / 10.0) * 0.2  # 假设Rust分数在0-10范围
        )

        combined_protection_score = (
                finbert_result["positive"] * 0.4 +
                gpt_result["protection_score"] * 0.4 +
                (avg_protection_score / 10.0) * 0.2
        )

        # 考虑否定表达的影响
        if negation_rate > 0.1:  # 如果否定表达比例超过10%
            # 增加风险分数，降低防护分数
            combined_risk_score = min(1.0, combined_risk_score * (1 + negation_rate * 0.5))
            combined_protection_score = max(0.0, combined_protection_score * (1 - negation_rate * 0.3))

        return {
            "risk_score": combined_risk_score,
            "protection_score": combined_protection_score,
            "model_scores": {
                "finbert": finbert_result,
                "gpt": gpt_result,
                "rust": {"risk": avg_risk_score, "protection": avg_protection_score}
            },
            "negation_rate": negation_rate
        }

    def calculate_risk_exposure(self, company_data):
        """计算企业的数字技术风险暴露度

        根据论文"极端风险敞口-平均防护能力"框架实现

        Args:
            company_data: 包含公司MD&A文本的数据框

        Returns:
            DataFrame: 包含风险敞口计算结果的数据框
        """
        self.logger.info(f"开始计算风险敞口，处理{len(company_data)}家公司的数据...")

        results = []

        # 按公司分组处理
        for company_code, group in company_data.groupby('stock_code'):
            self.logger.info(f"处理公司 {company_code}...")

            company_results = []
            for _, row in group.iterrows():
                year = row['year']
                mda_text = row['mda_text']

                # 跳过空文本
                if not isinstance(mda_text, str) or len(mda_text.strip()) < 100:
                    continue

                # 交叉验证分析文本
                analysis_result = self.cross_validate(mda_text)

                # 提取结果
                risk_score = analysis_result["risk_score"]
                protection_score = analysis_result["protection_score"]
                negation_rate = analysis_result["negation_rate"]

                company_results.append({
                    "stock_code": company_code,
                    "year": year,
                    "risk_score": risk_score,
                    "protection_score": protection_score,
                    "negation_rate": negation_rate,
                    "text_length": len(mda_text)
                })

            if company_results:
                # 计算极端风险敞口(取风险得分的最大值)
                max_risk_score = max(item["risk_score"] for item in company_results)

                # 计算平均防护能力
                avg_protection_score = sum(item["protection_score"] for item in company_results) / len(company_results)

                # 计算净风险暴露(极端风险敞口 - 平均防护能力)
                net_risk_exposure = max(0, max_risk_score - avg_protection_score)

                # 为每条记录添加汇总指标
                for item in company_results:
                    item["max_risk_score"] = max_risk_score
                    item["avg_protection_score"] = avg_protection_score
                    item["net_risk_exposure"] = net_risk_exposure

                results.extend(company_results)

        # 转换为DataFrame
        result_df = pd.DataFrame(results)

        # 按风险类型划分(根据文本内容特征)
        # 这里需要结合其他数据源，如工信部安全事件数据
        # 此处为简化实现
        def classify_risk_type(row):
            # 这里应结合具体业务逻辑进行分类
            if row["negation_rate"] > 0.15:
                return "data_security"  # 数据安全风险
            else:
                return "network_security"  # 网络安全风险

        result_df["risk_type"] = result_df.apply(classify_risk_type, axis=1)

        self.logger.info(f"风险敞口计算完成，共处理{len(result_df)}条记录")
        return result_df

