
import torch
from torch.utils.data import Dataset, DataLoader
from torch.optim import AdamW
from transformers import BertTokenizer, BertForSequenceClassification, RobertaTokenizer, RobertaForSequenceClassification
from transformers import get_linear_schedule_with_warmup
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, classification_report, confusion_matrix
import os
import time
import logging
import random
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from tqdm.notebook import tqdm
from torch.utils.data import TensorDataset, RandomSampler, SequentialSampler
import torch.nn as nn
import json
import gc
import warnings
warnings.filterwarnings('ignore')

# 常量配置
MODEL_NAME = "bert-base-chinese"  # 默认预训练模型
MAX_LENGTH = 128                 # 默认最大序列长度
BATCH_SIZE = 16                  # 默认批次大小
LEARNING_RATE = 2e-5             # 默认学习率
ADAM_EPSILON = 1e-8              # Adam优化器epsilon
WARMUP_STEPS = 0                 # 预热步数
EPOCHS = 3                       # 默认训练轮数
OUTPUT_DIR = "./models"          # 默认输出目录
NUM_LABELS = 3                   # 默认标签数量（负面、中性、正面）

# 设置日志
logging.basicConfig(
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S',
    level=logging.INFO
)
logger = logging.getLogger(__name__)


class ModelFactory:
    """模型工厂类，用于创建各种不同类型的模型和分词器"""

    @staticmethod
    def create_model(model_type, model_name, num_labels=NUM_LABELS, **kwargs):
        """
        创建模型和分词器

        参数:
            model_type: 模型类型，如 'bert', 'finbert', 'roberta', 'lstm' 等
            model_name: 预训练模型名称或路径
            num_labels: 标签数量
            **kwargs: 其他模型参数

        返回:
            model: 模型
            tokenizer: 分词器
        """
        model_type = model_type.lower()

        # BERT 系列模型
        if model_type in ['bert', 'finbert']:
            try:
                tokenizer = BertTokenizer.from_pretrained(model_name)
                model = BertForSequenceClassification.from_pretrained(
                    model_name,
                    num_labels=num_labels,
                    output_attentions=False,
                    output_hidden_states=False,
                    **kwargs
                )
                print(f"成功加载BERT模型: {model_name}")
                return model, tokenizer
            except Exception as e:
                print(f"BERT模型加载失败: {str(e)}")
                raise

        # RoBERTa 系列模型
        elif model_type == 'roberta':
            try:
                tokenizer = RobertaTokenizer.from_pretrained(model_name)
                model = RobertaForSequenceClassification.from_pretrained(
                    model_name,
                    num_labels=num_labels,
                    output_attentions=False,
                    output_hidden_states=False,
                    **kwargs
                )
                print(f"成功加载RoBERTa模型: {model_name}")
                return model, tokenizer
            except Exception as e:
                print(f"RoBERTa模型加载失败: {str(e)}")
                raise

        # LSTM 模型
        elif model_type == 'lstm':
            # 为LSTM创建一个简单的分词器
            tokenizer = BertTokenizer.from_pretrained(model_name) if model_name else BertTokenizer.from_pretrained(
                "bert-base-chinese")

            # 获取词汇表大小
            vocab_size = len(tokenizer.vocab)

            # 创建LSTM模型
            embedding_dim = kwargs.get('embedding_dim', 300)
            hidden_dim = kwargs.get('hidden_dim', 256)
            num_layers = kwargs.get('num_layers', 2)
            dropout = kwargs.get('dropout', 0.2)
            bidirectional = kwargs.get('bidirectional', True)

            model = LSTMClassifier(
                vocab_size=vocab_size,
                embedding_dim=embedding_dim,
                hidden_dim=hidden_dim,
                num_layers=num_layers,
                num_labels=num_labels,
                dropout=dropout,
                bidirectional=bidirectional
            )

            print(f"成功创建LSTM模型")
            return model, tokenizer

        else:
            print(f"不支持的模型类型: {model_type}")
            raise ValueError(f"不支持的模型类型: {model_type}")


class LSTMClassifier(nn.Module):
    """LSTM分类器模型"""

    def __init__(self, vocab_size, embedding_dim, hidden_dim, num_layers, num_labels, dropout=0.2, bidirectional=True):
        """
        初始化LSTM分类器

        参数:
            vocab_size: 词汇表大小
            embedding_dim: 嵌入维度
            hidden_dim: 隐藏层维度
            num_layers: LSTM层数
            num_labels: 标签数量
            dropout: Dropout比例
            bidirectional: 是否使用双向LSTM
        """
        super(LSTMClassifier, self).__init__()

        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.lstm = nn.LSTM(
            embedding_dim,
            hidden_dim,
            num_layers=num_layers,
            dropout=dropout if num_layers > 1 else 0,
            batch_first=True,
            bidirectional=bidirectional
        )

        # 如果是双向LSTM，则需要乘以2
        self.fc = nn.Linear(hidden_dim * 2 if bidirectional else hidden_dim, num_labels)
        self.dropout = nn.Dropout(dropout)

    def forward(self, input_ids, attention_mask=None, labels=None):
        """
        前向传播

        参数:
            input_ids: 输入ID
            attention_mask: 注意力掩码
            labels: 标签

        返回:
            包含损失和logits的对象
        """
        # 嵌入层
        embedded = self.embedding(input_ids)

        # 应用注意力掩码（如果提供）
        if attention_mask is not None:
            # 扩展注意力掩码以匹配嵌入维度
            mask = attention_mask.unsqueeze(-1).expand_as(embedded)
            embedded = embedded * mask

        # LSTM层
        lstm_output, (hidden, cell) = self.lstm(embedded)

        # 从LSTM输出中获取句子表示
        if self.lstm.bidirectional:
            # 如果是双向，连接最后一层的正向和反向
            hidden = torch.cat((hidden[-2], hidden[-1]), dim=1)
        else:
            hidden = hidden[-1]

        # Dropout
        hidden = self.dropout(hidden)

        # 全连接层
        logits = self.fc(hidden)

        # 如果提供了标签，计算损失
        loss = None
        if labels is not None:
            loss_fn = nn.CrossEntropyLoss()
            loss = loss_fn(logits, labels)

        # 返回与Hugging Face模型相似的输出格式
        return type('LSTMOutput', (), {
            'loss': loss,
            'logits': logits
        })


class BaseTrainer:
    """通用模型训练器基类，提供所有模型训练器共用的方法"""

    def __init__(self,
                 model_type,
                 model_name=MODEL_NAME,
                 max_seq_length=MAX_LENGTH,
                 batch_size=BATCH_SIZE,
                 learning_rate=LEARNING_RATE,
                 adam_epsilon=ADAM_EPSILON,
                 weight_decay=0.01,
                 warmup_steps=WARMUP_STEPS,
                 num_epochs=EPOCHS,
                 num_labels=NUM_LABELS,
                 output_dir=OUTPUT_DIR,
                 mixed_precision=False,
                 fp16=False,
                 model_kwargs=None):
        """
        初始化通用训练器

        参数:
            model_type: 模型类型，如 'bert', 'finbert', 'roberta', 'lstm' 等
            model_name: 预训练模型名称或路径
            max_seq_length: 最大序列长度
            batch_size: 批次大小
            learning_rate: 学习率
            adam_epsilon: Adam优化器的epsilon参数
            weight_decay: 权重衰减
            warmup_steps: 预热步数
            num_epochs: 训练轮数
            num_labels: 标签数量
            output_dir: 模型保存目录
            mixed_precision: 是否使用混合精度训练
            fp16: 是否使用FP16训练
            model_kwargs: 模型额外参数
        """
        self.model_type = model_type
        self.model_name = model_name
        self.max_seq_length = max_seq_length
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.adam_epsilon = adam_epsilon
        self.weight_decay = weight_decay
        self.warmup_steps = warmup_steps
        self.num_epochs = num_epochs
        self.num_labels = num_labels
        self.output_dir = output_dir
        self.mixed_precision = mixed_precision
        self.fp16 = fp16
        self.model_kwargs = model_kwargs or {}

        # 创建输出目录
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

        # 设置设备
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"使用设备: {self.device}")

        # 如果使用CUDA，设置随机种子以保证可重复性
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(42)

        # 加载模型和分词器
        self._load_model_and_tokenizer()

        # 初始化混合精度训练（如果支持）
        if self.mixed_precision and hasattr(torch.cuda, 'amp'):
            self.scaler = torch.cuda.amp.GradScaler(enabled=self.fp16)
        else:
            self.mixed_precision = False
            self.scaler = None
            if self.fp16:
                print("FP16被请求但不支持，将使用FP32训练")

    def _load_model_and_tokenizer(self):
        """加载模型和分词器"""
        try:
            print(f"从 {self.model_name} 加载 {self.model_type} 模型...")
            self.model, self.tokenizer = ModelFactory.create_model(
                self.model_type,
                self.model_name,
                num_labels=self.num_labels,
                **self.model_kwargs
            )
            self.model.to(self.device)
            print("模型和分词器加载成功")

        except Exception as e:
            print(f"模型加载失败: {str(e)}")
            raise

    def prepare_data(self, df, text_column, label_column, test_size=0.2, random_state=42):
        """
        准备训练数据

        参数:
            df: 包含文本和标签的DataFrame
            text_column: 文本列名
            label_column: 标签列名
            test_size: 测试集比例
            random_state: 随机种子

        返回:
            train_dataloader: 训练数据加载器
            validation_dataloader: 验证数据加载器
            label_dict: 标签字典
        """
        print("准备训练数据...")

        # 创建标签映射
        unique_labels = df[label_column].unique()
        label_dict = {label: i for i, label in enumerate(unique_labels)}
        print(f"标签映射: {label_dict}")

        # 编码标签
        df['label_id'] = df[label_column].map(label_dict)

        # 分割训练集和验证集
        train_df, val_df = train_test_split(
            df,
            test_size=test_size,
            random_state=random_state,
            stratify=df['label_id']
        )

        print(f"训练集大小: {len(train_df)}, 验证集大小: {len(val_df)}")

        # 创建数据加载器
        train_dataloader = self._create_dataloader(train_df, text_column, 'label_id', True)
        validation_dataloader = self._create_dataloader(val_df, text_column, 'label_id', False)

        print("数据准备完成")
        return train_dataloader, validation_dataloader, label_dict

    def _create_dataloader(self, df, text_column, label_column, is_train):
        """
        创建数据加载器

        参数:
            df: DataFrame
            text_column: 文本列名
            label_column: 标签列名
            is_train: 是否为训练数据

        返回:
            dataloader: 数据加载器
        """
        # 获取文本和标签
        texts = df[text_column].values
        labels = df[label_column].values

        # 对文本进行编码
        input_ids = []
        attention_masks = []

        for text in tqdm(texts, desc="编码文本"):
            encoded_dict = self.tokenizer.encode_plus(
                text,
                add_special_tokens=True,
                max_length=self.max_seq_length,
                padding='max_length',
                truncation=True,
                return_attention_mask=True,
                return_tensors='pt'
            )

            input_ids.append(encoded_dict['input_ids'])
            attention_masks.append(encoded_dict['attention_mask'])

        # 转换为张量
        input_ids = torch.cat(input_ids, dim=0)
        attention_masks = torch.cat(attention_masks, dim=0)
        labels = torch.tensor(labels)

        # 创建数据集
        dataset = TensorDataset(input_ids, attention_masks, labels)

        # 创建数据加载器
        sampler = RandomSampler(dataset) if is_train else SequentialSampler(dataset)
        dataloader = DataLoader(
            dataset,
            sampler=sampler,
            batch_size=self.batch_size
        )

        return dataloader

    def train(self, train_dataloader, validation_dataloader=None):
        """
        训练模型

        参数:
            train_dataloader: 训练数据加载器
            validation_dataloader: 验证数据加载器

        返回:
            训练历史记录
        """
        print("开始训练模型...")

        # 准备优化器
        optimizer = self._prepare_optimizer()

        # 计算总训练步数
        total_steps = len(train_dataloader) * self.num_epochs

        # 创建学习率调度器
        scheduler = get_linear_schedule_with_warmup(
            optimizer,
            num_warmup_steps=self.warmup_steps,
            num_training_steps=total_steps
        )

        # 跟踪训练历史
        history = {
            'train_loss': [],
            'val_loss': [],
            'val_accuracy': [],
            'val_f1': []
        }

        # 训练
        print(f"开始训练，共 {self.num_epochs} 轮...")

        for epoch in range(self.num_epochs):
            print(f"轮次 {epoch + 1}/{self.num_epochs}")

            # 训练一轮
            epoch_loss = self._train_epoch(train_dataloader, optimizer, scheduler)
            history['train_loss'].append(epoch_loss)

            # 保存模型
            epoch_output_dir = os.path.join(self.output_dir, f"{self.model_type}_finetuned_epoch_{epoch + 1}")
            if not os.path.exists(epoch_output_dir):
                os.makedirs(epoch_output_dir)

            print(f"保存模型到 {epoch_output_dir}")
            if hasattr(self.model, 'save_pretrained'):
                self.model.save_pretrained(epoch_output_dir)
                self.tokenizer.save_pretrained(epoch_output_dir)
            else:
                # 对于自定义模型，使用torch.save
                torch.save(self.model.state_dict(), os.path.join(epoch_output_dir, 'model.pt'))
                # 也可以保存整个模型
                torch.save(self.model, os.path.join(epoch_output_dir, 'model_full.pt'))

            # 评估模型
            if validation_dataloader:
                val_loss, val_metrics = self._evaluate(validation_dataloader)
                history['val_loss'].append(val_loss)
                history['val_accuracy'].append(val_metrics['accuracy'])
                history['val_f1'].append(val_metrics['f1'])

                print(
                    f"验证集结果 - 损失: {val_loss:.4f}, 准确率: {val_metrics['accuracy']:.4f}, F1分数: {val_metrics['f1']:.4f}")

        print("训练完成")

        # 可视化训练历史
        if validation_dataloader:
            self._visualize_training_history(history)

        return history

    def _prepare_optimizer(self):
        """
        准备优化器

        返回:
            optimizer: AdamW优化器
        """
        # 收集需要更新梯度的参数
        param_optimizer = list(self.model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']

        optimizer_grouped_parameters = [
            {
                'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
                'weight_decay': self.weight_decay
            },
            {
                'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
                'weight_decay': 0.0
            }
        ]

        optimizer = AdamW(
            optimizer_grouped_parameters,
            lr=self.learning_rate,
            eps=self.adam_epsilon
        )

        return optimizer

    def _train_epoch(self, train_dataloader, optimizer, scheduler):
        """
        训练一个轮次

        参数:
            train_dataloader: 训练数据加载器
            optimizer: 优化器
            scheduler: 学习率调度器

        返回:
            平均训练损失
        """
        self.model.train()
        total_loss = 0

        # 训练循环
        progress_bar = tqdm(train_dataloader, desc="训练中")

        for batch in progress_bar:
            # 将数据移动到设备上
            b_input_ids = batch[0].to(self.device)
            b_attention_mask = batch[1].to(self.device)
            b_labels = batch[2].to(self.device)

            # 清零梯度
            self.model.zero_grad()

            # 根据是否使用混合精度训练选择前向传播方式
            if self.mixed_precision:
                with torch.cuda.amp.autocast():
                    outputs = self.model(
                        b_input_ids,
                        token_type_ids=None,
                        attention_mask=b_attention_mask,
                        labels=b_labels
                    )
                    loss = outputs.loss

                # 反向传播
                self.scaler.scale(loss).backward()

                # 梯度裁剪
                self.scaler.unscale_(optimizer)
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)

                # 更新参数
                self.scaler.step(optimizer)
                self.scaler.update()
                scheduler.step()
            else:
                # 前向传播
                outputs = self.model(
                    b_input_ids,
                    token_type_ids=None,
                    attention_mask=b_attention_mask,
                    labels=b_labels
                )

                loss = outputs.loss

                # 反向传播
                loss.backward()

                # 梯度裁剪
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)

                # 更新参数
                optimizer.step()
                scheduler.step()

            total_loss += loss.item()

            # 更新进度条
            progress_bar.set_postfix({'loss': f'{loss.item():.4f}'})

        avg_train_loss = total_loss / len(train_dataloader)
        print(f"平均训练损失: {avg_train_loss:.4f}")

        return avg_train_loss

    def _evaluate(self, validation_dataloader):
        """
        评估模型

        参数:
            validation_dataloader: 验证数据加载器

        返回:
            avg_val_loss: 平均验证损失
            metrics: 评估指标字典
        """
        print("评估模型...")

        self.model.eval()

        val_loss = 0
        predictions = []
        true_labels = []

        for batch in tqdm(validation_dataloader, desc="评估中"):
            # 将数据移动到设备上
            b_input_ids = batch[0].to(self.device)
            b_attention_mask = batch[1].to(self.device)
            b_labels = batch[2].to(self.device)

            # 不计算梯度
            with torch.no_grad():
                # 前向传播
                outputs = self.model(
                    b_input_ids,
                    token_type_ids=None,
                    attention_mask=b_attention_mask,
                    labels=b_labels
                )

                loss = outputs.loss
                logits = outputs.logits

            val_loss += loss.item()

            # 移动logits和标签到CPU
            logits = logits.detach().cpu().numpy()
            label_ids = b_labels.to('cpu').numpy()

            # 保存预测和真实标签
            predictions.extend(np.argmax(logits, axis=1))
            true_labels.extend(label_ids)

        # 计算平均损失和准确率
        avg_val_loss = val_loss / len(validation_dataloader)
        accuracy = accuracy_score(true_labels, predictions)
        f1 = f1_score(true_labels, predictions, average='weighted')

        # 计算并显示分类报告
        report = classification_report(true_labels, predictions, target_names=['负面', '中性', '正面'])
        print(f"分类报告:\n{report}")

        # 计算并显示混淆矩阵
        cm = confusion_matrix(true_labels, predictions)
        self._plot_confusion_matrix(cm, ['负面', '中性', '正面'])

        metrics = {
            'accuracy': accuracy,
            'f1': f1,
            'report': report,
            'confusion_matrix': cm
        }

        return avg_val_loss, metrics

    def _plot_confusion_matrix(self, cm, classes):
        """
        绘制混淆矩阵

        参数:
            cm: 混淆矩阵
            classes: 类别名称
        """
        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=classes, yticklabels=classes)
        plt.title('混淆矩阵')
        plt.ylabel('真实标签')
        plt.xlabel('预测标签')
        plt.tight_layout()

        # 保存混淆矩阵图
        cm_path = os.path.join(self.output_dir, f"{self.model_type}_confusion_matrix.png")
        plt.savefig(cm_path)
        print(f"混淆矩阵已保存到 {cm_path}")

        plt.close()

    def _visualize_training_history(self, history):
        """
        可视化训练历史

        参数:
            history: 训练历史记录
        """
        plt.figure(figsize=(15, 5))

        # 绘制损失曲线
        plt.subplot(1, 3, 1)
        plt.plot(history['train_loss'], 'b-o', label='训练损失')
        plt.plot(history['val_loss'], 'r-o', label='验证损失')
        plt.title('训练和验证损失')
        plt.xlabel('轮次')
        plt.ylabel('损失')
        plt.legend()

        # 绘制准确率曲线
        plt.subplot(1, 3, 2)
        plt.plot(history['val_accuracy'], 'g-o', label='验证准确率')
        plt.title('验证准确率')
        plt.xlabel('轮次')
        plt.ylabel('准确率')
        plt.ylim([0, 1])

        # 绘制F1分数曲线
        plt.subplot(1, 3, 3)
        plt.plot(history['val_f1'], 'm-o', label='验证F1分数')
        plt.title('验证F1分数')
        plt.xlabel('轮次')
        plt.ylabel('F1分数')
        plt.ylim([0, 1])

        plt.tight_layout()

        # 保存历史图
        history_path = os.path.join(self.output_dir, f"{self.model_type}_training_history.png")
        plt.savefig(history_path)
        print(f"训练历史已保存到 {history_path}")

        plt.close()

    def predict(self, texts, batch_size=None):
        """
        预测文本的情感

        参数:
            texts: 字符串或字符串列表，要预测的文本
            batch_size: 批处理大小，如果为None则使用默认批次大小

        返回:
            字典或字典列表，包含预测结果
        """
        # 使用默认批次大小
        if batch_size is None:
            batch_size = self.batch_size

        # 如果输入是单个字符串，则转换为列表
        if isinstance(texts, str):
            texts = [texts]
            single_input = True
        else:
            single_input = False

        results = []

        # 批处理预测
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i + batch_size]
            batch_results = self._predict_batch(batch_texts)
            results.extend(batch_results)

        # 如果输入是单个字符串，则返回单个结果
        return results[0] if single_input else results

    def _predict_batch(self, texts):
        """
        批量预测文本的情感

        参数:
            texts: 字符串列表，要预测的文本

        返回:
            字典列表，包含预测结果
        """
        self.model.eval()

        # 对文本进行编码
        inputs = self.tokenizer(
            texts,
            padding=True,
            truncation=True,
            max_length=self.max_seq_length,
            return_tensors="pt"
        )

        # 将输入移动到设备上
        inputs = {k: v.to(self.device) for k, v in inputs.items()}

        # 预测
        with torch.no_grad():
            outputs = self.model(**inputs)
            logits = outputs.logits
            probabilities = torch.nn.functional.softmax(logits, dim=1)

        # 将结果转为numpy数组
        probs_numpy = probabilities.cpu().numpy()

        # 获取预测结果
        results = []
        for i, text in enumerate(texts):
            probs = probs_numpy[i]
            sentiment_idx = np.argmax(probs)
            sentiment = ['负面', '中性', '正面'][sentiment_idx]
            confidence = probs[sentiment_idx]

            result = {
                'text': text,
                'predicted_sentiment': sentiment,
                'confidence': confidence,
                'negative_prob': probs[0],
                'neutral_prob': probs[1],
                'positive_prob': probs[2]
            }
            results.append(result)

        return results

    def save_model(self, path=None):
        """
        保存模型

        参数:
            path: 保存路径，如果为None则使用默认路径
        """
        if path is None:
            path = os.path.join(self.output_dir, f"{self.model_type}_finetuned_final")

        if not os.path.exists(path):
            os.makedirs(path)

        print(f"保存模型到 {path}")

        if hasattr(self.model, 'save_pretrained'):
            self.model.save_pretrained(path)
            self.tokenizer.save_pretrained(path)
        else:
            # 对于自定义模型，使用torch.save
            torch.save(self.model.state_dict(), os.path.join(path, 'model.pt'))
            # 也可以保存整个模型
            torch.save(self.model, os.path.join(path, 'model_full.pt'))

        # 保存配置信息
        config = {
            'model_type': self.model_type,
            'model_name': self.model_name,
            'max_seq_length': self.max_seq_length,
            'num_labels': self.num_labels,
            'label_map': ['负面', '中性', '正面']
        }

        with open(os.path.join(path, 'config.json'), 'w') as f:
            json.dump(config, f)

    def load_model(self, path):
        """
        加载保存的模型

        参数:
            path: 模型路径
        """
        print(f"从 {path} 加载模型...")

        try:
            # 加载配置
            with open(os.path.join(path, 'config.json'), 'r') as f:
                config = json.load(f)

            self.model_type = config.get('model_type', self.model_type)
            self.model_name = config.get('model_name', self.model_name)
            self.max_seq_length = config.get('max_seq_length', self.max_seq_length)
            self.num_labels = config.get('num_labels', self.num_labels)

            # 根据模型类型加载
            if self.model_type in ['bert', 'finbert', 'roberta']:
                self.model = BertForSequenceClassification.from_pretrained(path)
                self.tokenizer = BertTokenizer.from_pretrained(path)
            elif self.model_type == 'lstm':
                # 加载LSTM模型需要先创建模型实例，然后加载状态
                self.model = torch.load(os.path.join(path, 'model_full.pt'))
                self.tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")

            self.model.to(self.device)
            print("模型加载成功")

        except Exception as e:
            print(f"模型加载失败: {str(e)}")
            raise


class FinBertTrainer(BaseTrainer):
    """FinBERT模型训练器类，用于微调FinBERT模型进行金融文本情感分析"""

    def __init__(self, **kwargs):
        """
        初始化FinBERT训练器

        参数:
            **kwargs: 传递给BaseTrainer的参数
        """
        # 设置默认模型类型为'finbert'
        kwargs['model_type'] = 'finbert'
        super().__init__(**kwargs)


class BertTrainer(BaseTrainer):
    """BERT模型训练器类，用于微调BERT模型进行文本情感分析"""

    def __init__(self, **kwargs):
        """
        初始化BERT训练器

        参数:
            **kwargs: 传递给BaseTrainer的参数
        """
        # 设置默认模型类型为'bert'
        kwargs['model_type'] = 'bert'
        super().__init__(**kwargs)


class RobertaTrainer(BaseTrainer):
    """RoBERTa模型训练器类，用于微调RoBERTa模型进行文本情感分析"""

    def __init__(self, **kwargs):
        """
        初始化RoBERTa训练器

        参数:
            **kwargs: 传递给BaseTrainer的参数
        """
        # 设置默认模型类型为'roberta'
        kwargs['model_type'] = 'roberta'
        super().__init__(**kwargs)


class LSTMTrainer(BaseTrainer):
    """LSTM模型训练器类，用于训练LSTM模型进行文本情感分析"""

    def __init__(self, **kwargs):
        """
        初始化LSTM训练器

        参数:
            **kwargs: 传递给BaseTrainer的参数
        """
        # 设置默认模型类型为'lstm'
        kwargs['model_type'] = 'lstm'

        # 设置LSTM模型特定参数
        model_kwargs = kwargs.get('model_kwargs', {})
        model_kwargs.update({
            'embedding_dim': kwargs.pop('embedding_dim', 300),
            'hidden_dim': kwargs.pop('hidden_dim', 256),
            'num_layers': kwargs.pop('num_layers', 2),
            'dropout': kwargs.pop('dropout', 0.2),
            'bidirectional': kwargs.pop('bidirectional', True)
        })
        kwargs['model_kwargs'] = model_kwargs

        super().__init__(**kwargs)


def create_trainer(model_type='finbert',
                   model_name=MODEL_NAME,
                   max_seq_length=MAX_LENGTH,
                   batch_size=BATCH_SIZE,
                   learning_rate=LEARNING_RATE,
                   num_epochs=EPOCHS,
                   output_dir=OUTPUT_DIR,
                   mixed_precision=False,
                   **kwargs):
    """
    创建模型训练器

    参数:
        model_type: 模型类型，支持 'finbert', 'bert', 'roberta', 'lstm'
        model_name: 预训练模型名称或路径
        max_seq_length: 最大序列长度
        batch_size: 批次大小
        learning_rate: 学习率
        num_epochs: 训练轮数
        output_dir: 模型保存目录
        mixed_precision: 是否使用混合精度训练
        **kwargs: 其他参数

    返回:
        训练器实例
    """
    model_type = model_type.lower()

    # 公共参数
    common_params = {
        'model_name': model_name,
        'max_seq_length': max_seq_length,
        'batch_size': batch_size,
        'learning_rate': learning_rate,
        'num_epochs': num_epochs,
        'output_dir': output_dir,
        'mixed_precision': mixed_precision,
        **kwargs
    }

    # 根据模型类型创建相应的训练器
    if model_type == 'finbert':
        return FinBertTrainer(**common_params)
    elif model_type == 'bert':
        return BertTrainer(**common_params)
    elif model_type == 'roberta':
        return RobertaTrainer(**common_params)
    elif model_type == 'lstm':
        return LSTMTrainer(**common_params)
    else:
        raise ValueError(f"不支持的模型类型: {model_type}")


def train_multi_models(df, text_column, label_column, models_config, test_size=0.2, random_state=42):
    """
    训练多个模型并比较性能

    参数:
        df: 数据DataFrame
        text_column: 文本列名
        label_column: 标签列名
        models_config: 模型配置列表，每个配置是一个字典，包含模型类型和参数
        test_size: 测试集比例
        random_state: 随机种子

    返回:
        models_results: 模型结果字典，包含每个模型的训练器和性能指标
    """
    print(f"开始训练 {len(models_config)} 个模型")

    # 创建用于比较的数据集分割
    unique_labels = df[label_column].unique()
    label_dict = {label: i for i, label in enumerate(unique_labels)}

    # 编码标签
    df['label_id'] = df[label_column].map(label_dict)

    # 分割训练集和验证集
    train_df, val_df = train_test_split(
        df,
        test_size=test_size,
        random_state=random_state,
        stratify=df['label_id']
    )

    print(f"训练集大小: {len(train_df)}, 验证集大小: {len(val_df)}")

    # 存储结果
    models_results = {}

    # 训练每个模型
    for config in models_config:
        model_type = config.pop('model_type')
        model_name = config.pop('model_name', MODEL_NAME)

        print(f"训练模型: {model_type} - {model_name}")

        try:
            # 创建训练器
            trainer = create_trainer(
                model_type=model_type,
                model_name=model_name,
                **config
            )

            # 准备数据
            train_dataloader, validation_dataloader, _ = trainer.prepare_data(
                df, text_column, label_column, test_size, random_state
            )

            # 训练模型
            history = trainer.train(train_dataloader, validation_dataloader)

            # 评估模型
            _, metrics = trainer._evaluate(validation_dataloader)

            # 存储结果
            models_results[f"{model_type}_{model_name}"] = {
                'trainer': trainer,
                'history': history,
                'metrics': metrics
            }

            print(
                f"模型 {model_type} - {model_name} 训练完成，准确率: {metrics['accuracy']:.4f}, F1分数: {metrics['f1']:.4f}")

        except Exception as e:
            print(f"模型 {model_type} - {model_name} 训练失败: {str(e)}")

    # 比较模型性能
    if models_results:
        _compare_models_performance(models_results)

    return models_results


def _compare_models_performance(models_results):
    """
    比较多个模型的性能

    参数:
        models_results: 模型结果字典
    """
    # 提取性能指标
    model_names = []
    accuracies = []
    f1_scores = []

    for model_name, result in models_results.items():
        metrics = result['metrics']
        model_names.append(model_name)
        accuracies.append(metrics['accuracy'])
        f1_scores.append(metrics['f1'])

    # 创建比较表
    comparison_df = pd.DataFrame({
        'Model': model_names,
        'Accuracy': accuracies,
        'F1 Score': f1_scores
    })

    print("模型性能比较:")
    print(f"\n{comparison_df.to_string(index=False)}")

    # 可视化比较
    plt.figure(figsize=(12, 6))

    # 准确率比较
    plt.subplot(1, 2, 1)
    plt.bar(model_names, accuracies, color='blue')
    plt.title('模型准确率比较')
    plt.xlabel('模型')
    plt.ylabel('准确率')
    plt.xticks(rotation=45)

    # F1分数比较
    plt.subplot(1, 2, 2)
    plt.bar(model_names, f1_scores, color='green')
    plt.title('模型F1分数比较')
    plt.xlabel('模型')
    plt.ylabel('F1分数')
    plt.xticks(rotation=45)

    plt.tight_layout()

    # 保存比较图
    plt.savefig("models_comparison.png")
    print("模型比较图已保存到 models_comparison.png")

    plt.close()


def ensemble_predict(trainers, texts, weights=None):
    """
    集成多个模型进行预测

    参数:
        trainers: 训练器列表或字典
        texts: 文本列表
        weights: 模型权重列表，如果为None则使用相等权重

    返回:
        预测结果列表
    """
    # 处理trainers输入 - 支持列表或字典
    if isinstance(trainers, dict):
        trainers = list(trainers.values())

    # 确保所有的元素都是Trainer实例
    trainers = [t['trainer'] if isinstance(t, dict) else t for t in trainers]

    # 设置权重
    if weights is None:
        weights = [1.0 / len(trainers)] * len(trainers)

    # 获取每个模型的预测
    all_predictions = []
    for trainer in trainers:
        predictions = trainer.predict(texts)
        all_predictions.append(predictions)

    # 合并预测
    results = []
    for i in range(len(texts)):
        # 初始化概率
        probs = np.zeros(3)  # 假设3个类别：负面、中性、正面

        # 加权合并各模型的预测概率
        for model_idx, predictions in enumerate(all_predictions):
            pred = predictions[i] if isinstance(predictions, list) else predictions
            probs[0] += weights[model_idx] * pred['negative_prob']
            probs[1] += weights[model_idx] * pred['neutral_prob']
            probs[2] += weights[model_idx] * pred['positive_prob']

        # 获取最终预测
        sentiment_idx = np.argmax(probs)
        sentiment = ['负面', '中性', '正面'][sentiment_idx]
        confidence = probs[sentiment_idx]

        result = {
            'text': texts[i],
            'predicted_sentiment': sentiment,
            'confidence': confidence,
            'negative_prob': probs[0],
            'neutral_prob': probs[1],
            'positive_prob': probs[2]
        }
        results.append(result)

    return results


def save_ensemble_model(models_results, output_dir="./ensemble_model"):
    """
    保存集成模型

    参数:
        models_results: 模型结果字典
        output_dir: 输出目录

    返回:
        ensemble_config: 集成模型配置
    """
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 保存每个模型
    model_paths = {}
    for model_name, result in models_results.items():
        trainer = result['trainer']
        model_path = os.path.join(output_dir, model_name)
        trainer.save_model(model_path)
        model_paths[model_name] = model_path

    # 保存集成配置
    ensemble_config = {
        'model_paths': model_paths,
        'weights': [1.0 / len(models_results)] * len(models_results),  # 默认权重
        'model_types': {name: result['trainer'].model_type for name, result in models_results.items()}
    }

    with open(os.path.join(output_dir, 'ensemble_config.json'), 'w') as f:
        json.dump(ensemble_config, f)

    print(f"集成模型已保存到 {output_dir}")
    return ensemble_config


def load_ensemble_model(config_path):
    """
    加载集成模型

    参数:
        config_path: 配置文件路径

    返回:
        trainers: 训练器列表
        weights: 权重列表
    """
    with open(config_path, 'r') as f:
        config = json.load(f)

    model_paths = config['model_paths']
    weights = config.get('weights', [1.0 / len(model_paths)] * len(model_paths))
    model_types = config.get('model_types', {})

    trainers = []
    for model_name, path in model_paths.items():
        model_type = model_types.get(model_name, 'finbert')  # 默认为finbert
        try:
            trainer = create_trainer(model_type=model_type)
            trainer.load_model(path)
            trainers.append(trainer)
        except Exception as e:
            print(f"加载模型 {model_name} 失败: {str(e)}")

    return trainers, weights


def optimize_memory():
    """
    优化内存使用
    """
    # 清理PyTorch缓存
    if torch.cuda.is_available():
        torch.cuda.empty_cache()

    # 强制垃圾回收
    gc.collect()


def export_model_to_onnx(trainer, output_path):
    """
    将模型导出为ONNX格式，以便在其他平台上使用

    参数:
        trainer: 训练器实例
        output_path: 输出路径
    """
    try:
        # 确保输出目录存在
        os.makedirs(os.path.dirname(output_path), exist_ok=True)

        # 准备示例输入
        dummy_input_ids = torch.ones(1, trainer.max_seq_length, dtype=torch.long).to(trainer.device)
        dummy_attention_mask = torch.ones(1, trainer.max_seq_length, dtype=torch.long).to(trainer.device)
        dummy_inputs = (dummy_input_ids, dummy_attention_mask)

        # 设置输入名称
        input_names = ["input_ids", "attention_mask"]
        output_names = ["logits"]

        # 导出为ONNX
        torch.onnx.export(
            trainer.model,
            dummy_inputs,
            output_path,
            export_params=True,
            opset_version=11,
            do_constant_folding=True,
            input_names=input_names,
            output_names=output_names,
            dynamic_axes={
                "input_ids": {0: "batch_size"},
                "attention_mask": {0: "batch_size"},
                "logits": {0: "batch_size"}
            }
        )

        print(f"模型已导出为ONNX格式: {output_path}")
        return True
    except Exception as e:
        print(f"导出ONNX模型失败: {str(e)}")
        return False


def batch_predict_file(trainer, input_file, output_file, text_column, batch_size=32):
    """
    批量预测文件

    参数:
        trainer: 训练器实例
        input_file: 输入文件路径
        output_file: 输出文件路径
        text_column: 文本列名
        batch_size: 批处理大小
    """
    # 读取输入文件
    df = pd.read_csv(input_file)

    # 获取待预测文本
    texts = df[text_column].tolist()

    # 批量预测
    results = []
    for i in range(0, len(texts), batch_size):
        batch_texts = texts[i:i + batch_size]
        batch_results = trainer.predict(batch_texts)
        results.extend(batch_results)

    # 将预测结果添加到DataFrame
    df['predicted_sentiment'] = [r['predicted_sentiment'] for r in results]
    df['confidence'] = [r['confidence'] for r in results]
    df['negative_prob'] = [r['negative_prob'] for r in results]
    df['neutral_prob'] = [r['neutral_prob'] for r in results]
    df['positive_prob'] = [r['positive_prob'] for r in results]

    # 保存结果
    df.to_csv(output_file, index=False)
    print(f"预测结果已保存到 {output_file}")

    return df


# 使用示例

# 示例1：创建并训练单个模型
def example_single_model():
    print("示例1：创建并训练单个FinBERT模型")

    # 创建FinBERT训练器
    trainer = create_trainer(
        model_type='finbert',
        model_name='GEEKLEO/FINBERT',
        max_seq_length=128,
        batch_size=16,
        learning_rate=2e-5,
        num_epochs=10,
        output_dir='./models/finbert'
    )

    # 加载示例数据
    df = pd.read_csv('./data/taged_data_by_llm_with_isvaild_combined_copy1.csv', encoding='utf-8-sig')

    # 准备数据
    train_dataloader, validation_dataloader, label_dict = trainer.prepare_data(
        df, 'post_title', 'sentiment'
    )

    # 训练模型
    history = trainer.train(train_dataloader, validation_dataloader)

    # 预测示例
    example_text = "公司业绩持续上升，市场表现良好"
    result = trainer.predict(example_text)
    print(f"预测结果: {result}")

    return trainer


# 示例2：训练多个模型并比较性能
def example_multi_models():
    print("示例2：训练多个模型并比较性能")

    # 加载示例数据
    df = pd.read_csv('./data/taged_data_by_llm_with_isvaild_combined_copy1.csv')

    # 配置多个模型
    models_config = [
        {
            'model_type': 'finbert',
            'model_name': 'GEEKLEO/FINBERT',
            'max_seq_length': 128,
            'batch_size': 32,
            'learning_rate': 1e-5,
            'num_epochs': 10,
            'output_dir': './models/finbert_multi'
        },
          {
                'model_type': 'roberta',
                'model_name': 'hfl/chinese-roberta-wwm-ext',
                'max_seq_length': 128,
                'batch_size': 32,
                'learning_rate': 1e-5,
                'num_epochs': 10,
                'output_dir': './models/roberta'
            },
        {
            'model_type': 'bert',
            'model_name': 'bert-base-chinese',
            'max_seq_length': 128,
            'batch_size': 16,
            'learning_rate': 3e-5,
            'num_epochs': 10,
            'output_dir': './models/bert'
        },
        {
            'model_type': 'lstm',
            'model_name': 'bert-base-chinese',  # 用于分词器
            'max_seq_length': 128,
            'batch_size': 32,
            'learning_rate': 1e-4,
            'num_epochs': 10,
            'output_dir': './models/lstm',
            'model_kwargs': {
                'embedding_dim': 768,
                'hidden_dim': 256,
                'num_layers': 2,
                'dropout': 0.3,
                'bidirectional': True
            }
        }
    ]

    # 训练多个模型
    models_results = train_multi_models(df, 'post_title', 'sentiment', models_config)

    # 保存集成模型
    ensemble_config = save_ensemble_model(models_results, './models/ensemble')

    # 集成预测
    trainers = [result['trainer'] for result in models_results.values()]
    example_texts = [
        "公司业绩持续上升，市场表现良好",
        "公司业绩不及预期，投资者信心受挫",
        "公司公告称将继续加大研发投入，暂时没有分红计划"
    ]

    ensemble_results = ensemble_predict(trainers, example_texts)
    print("集成预测结果:")
    for result in ensemble_results:
        print(f"文本: {result['text']}")
        print(f"预测情感: {result['predicted_sentiment']}")
        print(f"置信度: {result['confidence']:.4f}")
        print()

    return models_results, ensemble_config


# 示例3：导出模型为ONNX格式
def example_export_onnx(trainer):
    print("示例3：导出模型为ONNX格式")

    # 导出模型
    output_path = "./models/finbert/model.onnx"
    export_model_to_onnx(trainer, output_path)

    print(f"模型已导出为ONNX格式: {output_path}")


# 示例4：批量预测文件
def example_batch_predict(trainer):
    print("示例4：批量预测文件")

    # 批量预测
    input_file = "test_data.csv"
    output_file = "test_data_predicted.csv"

    batch_predict_file(trainer, input_file, output_file, 'post_title')

    print(f"预测结果已保存到 {output_file}")


