import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertForSequenceClassification
from torch.optim import AdamW
from transformers import get_linear_schedule_with_warmup
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, classification_report
import os
import time
import logging
import random
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from tqdm.notebook import tqdm
from torch.utils.data import TensorDataset, RandomSampler, SequentialSampler

# 配置参数（常量形式）
DATA_PATH = "taged_data_by_llm.csv"  # 训练数据路径
MODEL_NAME = "yiyanghkust/finbert-tone"  # 预训练模型名称
OUTPUT_DIR = "./finbert_finetuned"  # 保存微调模型的目录
MAX_LENGTH = 128  # 最大序列长度
BATCH_SIZE = 16  # 训练批次大小
LEARNING_RATE = 2e-5  # 学习率
ADAM_EPSILON = 1e-8  # Adam优化器的epsilon参数
EPOCHS = 3  # 训练轮数
WARMUP_STEPS = 0  # 预热步数
SEED = 42  # 随机种子
NUM_WORKERS = 0  # Jupyter中推荐设为0，避免多进程问题

# 设置日志
logging.basicConfig(
    format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
    datefmt="%Y/%m/%d %H:%M:%S",
    level=logging.INFO,
)
logger = logging.getLogger(__name__)

# 设置随机种子以保证可重复性
def set_seed(seed_value=SEED):
    random.seed(seed_value)
    np.random.seed(seed_value)
    torch.manual_seed(seed_value)
    torch.cuda.manual_seed_all(seed_value)

# 定义数据集类
class FinancialDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_length=MAX_LENGTH):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length
        
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = str(self.texts[idx])
        label = self.labels[idx]
        
        # 对文本进行编码
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_length,
            return_token_type_ids=True,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt'
        )
        
        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'token_type_ids': encoding['token_type_ids'].flatten(),
            'labels': torch.tensor(label, dtype=torch.long)
        }

# 读取和预处理数据
def load_data(data_path=DATA_PATH):
    logger.info(f"加载数据集: {data_path}")
    df = pd.read_csv(data_path)
    
    # 显示数据样例
    logger.info(f"数据样例:\n{df.head()}")
    
    # 检查情感标签并转换为数字
    sentiment_map = {'负面': 0, '中性': 1, '正面': 2}
    
    # 创建标签列，将情感映射为数字
    df['label'] = df['sentiment'].map(sentiment_map)
    
    # 处理可能的缺失值
    df = df.dropna(subset=['post_title', 'label'])
    
    # 确认标签分布
    label_counts = df['label'].value_counts()
    logger.info(f"标签分布:\n{label_counts}")
    
    # 分割数据集
    train_texts, val_texts, train_labels, val_labels = train_test_split(
        df['post_title'].values, 
        df['label'].values,
        test_size=0.1,
        random_state=SEED,
        stratify=df['label'] if len(df['label'].unique()) > 1 else None
    )
    
    logger.info(f"训练集大小: {len(train_texts)}, 验证集大小: {len(val_texts)}")
    
    return train_texts, val_texts, train_labels, val_labels

# 训练和评估模型
def train_model():
    # 设置随机种子
    set_seed()
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logger.info(f"使用设备: {device}")
    
    # 加载FinBERT tokenizer
    tokenizer = BertTokenizer.from_pretrained(MODEL_NAME)
    logger.info(f"加载tokenizer: {MODEL_NAME}")
    
    # 加载数据
    train_texts, val_texts, train_labels, val_labels = load_data()
    
    # 创建数据集
    train_dataset = FinancialDataset(train_texts, train_labels, tokenizer)
    val_dataset = FinancialDataset(val_texts, val_labels, tokenizer)
    
    # 创建数据加载器
    train_dataloader = DataLoader(
        train_dataset,
        batch_size=BATCH_SIZE,
        shuffle=True,
        num_workers=NUM_WORKERS
    )
    
    val_dataloader = DataLoader(
        val_dataset,
        batch_size=BATCH_SIZE,
        shuffle=False,
        num_workers=NUM_WORKERS
    )
    
    # 加载预训练模型
    model = BertForSequenceClassification.from_pretrained(
        MODEL_NAME,
        num_labels=3,  # 负面、中性、正面
        output_attentions=False,
        output_hidden_states=False,
    )
    model.to(device)
    logger.info(f"加载模型: {MODEL_NAME}")
    
    # 设置优化器
    optimizer = AdamW(model.parameters(), lr=LEARNING_RATE, eps=ADAM_EPSILON)
    
    # 计算总训练步数
    total_steps = len(train_dataloader) * EPOCHS
    
    # 创建学习率调度器
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=WARMUP_STEPS,
        num_training_steps=total_steps
    )
    
    # 开始训练
    logger.info("开始训练...")
    best_val_f1 = 0.0
    
    # 保存训练结果和评估指标
    training_stats = []
    
    for epoch in range(EPOCHS):
        logger.info(f"Epoch {epoch+1}/{EPOCHS}")
        
        # 记录开始时间
        t0 = time.time()
        
        # 训练阶段
        model.train()
        train_loss = 0
        
        for step, batch in enumerate(train_dataloader):
            # 每100步打印进度
            if step % 100 == 0 and not step == 0:
                elapsed = time.time() - t0
                logger.info(f'批次 {step}/{len(train_dataloader)}. 已用时间: {elapsed:.1f}秒')
                
            # 将数据放到指定设备上
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            token_type_ids = batch['token_type_ids'].to(device)
            labels = batch['labels'].to(device)
            
            # 清空梯度
            optimizer.zero_grad()
            
            # 前向传播
            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                token_type_ids=token_type_ids,
                labels=labels
            )
            
            loss = outputs.loss
            train_loss += loss.item()
            
            # 反向传播
            loss.backward()
            
            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            
            # 更新参数
            optimizer.step()
            scheduler.step()
        
        # 计算平均训练损失
        avg_train_loss = train_loss / len(train_dataloader)
        
        # 记录训练时间
        training_time = time.time() - t0
        logger.info(f"训练用时: {training_time:.1f}秒")
        logger.info(f"训练损失: {avg_train_loss:.4f}")
        
        # 评估阶段
        t0 = time.time()
        model.eval()
        val_loss = 0
        predictions = []
        true_labels = []
        
        for batch in val_dataloader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            token_type_ids = batch['token_type_ids'].to(device)
            labels = batch['labels'].to(device)
            
            with torch.no_grad():
                outputs = model(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    token_type_ids=token_type_ids,
                    labels=labels
                )
            
            loss = outputs.loss
            val_loss += loss.item()
            
            # 获取预测结果
            logits = outputs.logits
            preds = torch.argmax(logits, dim=1).cpu().numpy()
            
            predictions.extend(preds)
            true_labels.extend(labels.cpu().numpy())
        
        # 计算评估指标
        evaluation_time = time.time() - t0
        avg_val_loss = val_loss / len(val_dataloader)
        val_accuracy = accuracy_score(true_labels, predictions)
        val_f1 = f1_score(true_labels, predictions, average='weighted')
        
        # 记录评估结果
        logger.info(f"验证用时: {evaluation_time:.1f}秒")
        logger.info(f"验证损失: {avg_val_loss:.4f}")
        logger.info(f"验证准确率: {val_accuracy:.4f}")
        logger.info(f"验证F1分数: {val_f1:.4f}")
        
        # 打印分类报告
        report = classification_report(true_labels, predictions, target_names=['负面', '中性', '正面'])
        logger.info(f"分类报告:\n{report}")
        
        # 保存训练统计信息
        training_stats.append(
            {
                'epoch': epoch + 1,
                'train_loss': avg_train_loss,
                'val_loss': avg_val_loss,
                'val_accuracy': val_accuracy,
                'val_f1': val_f1,
                'training_time': training_time,
                'evaluation_time': evaluation_time
            }
        )
        
        # 保存最佳模型
        if val_f1 > best_val_f1:
            best_val_f1 = val_f1
            logger.info(f"发现更好的模型，F1={val_f1:.4f}，保存模型...")
            
            if not os.path.exists(OUTPUT_DIR):
                os.makedirs(OUTPUT_DIR)
            
            model_path = os.path.join(OUTPUT_DIR, f"finbert_finetuned_epoch_{epoch+1}")
            model.save_pretrained(model_path)
            tokenizer.save_pretrained(model_path)
            
            logger.info(f"模型保存到: {model_path}")
    
    logger.info("训练完成！")
    
    # 显示训练统计信息
    logger.info("训练统计信息:")
    stats_df = pd.DataFrame(training_stats)
    logger.info(stats_df)
    
    return model, tokenizer, stats_df

# 用于预测的类和函数
class SentimentPredictor:
    def __init__(self, model_path):
        """
        初始化情感预测器
        
        参数:
            model_path: 微调后的模型路径
        """
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        logger.info(f"使用设备: {self.device}")
        
        # 加载模型和tokenizer
        self.tokenizer = BertTokenizer.from_pretrained(model_path)
        self.model = BertForSequenceClassification.from_pretrained(model_path)
        self.model.to(self.device)
        self.model.eval()
        
        logger.info(f"模型加载完成: {model_path}")
        
        # 情感标签映射
        self.id2label = {0: '负面', 1: '中性', 2: '正面'}
    
    def predict(self, text, max_length=MAX_LENGTH):
        """
        预测文本的情感
        
        参数:
            text: 输入文本
            max_length: 最大序列长度
            
        返回:
            sentiment: 情感标签
            confidence: 置信度
        """
        # 对文本进行编码
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=max_length,
            return_token_type_ids=True,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt'
        )
        
        # 将编码放到指定设备上
        input_ids = encoding['input_ids'].to(self.device)
        attention_mask = encoding['attention_mask'].to(self.device)
        token_type_ids = encoding['token_type_ids'].to(self.device)
        
        # 预测
        with torch.no_grad():
            outputs = self.model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                token_type_ids=token_type_ids
            )
        
        # 获取预测结果
        logits = outputs.logits
        probabilities = torch.nn.functional.softmax(logits, dim=1)
        confidence, predicted_class = torch.max(probabilities, dim=1)
        
        sentiment = self.id2label[predicted_class.item()]
        confidence = confidence.item()
        
        return sentiment, confidence, probabilities.cpu().numpy()[0]

def predict_file(predictor, input_path, output_path=None):
    """
    预测CSV文件中的文本情感
    
    参数:
        predictor: 情感预测器实例
        input_path: 输入CSV文件路径
        output_path: 输出CSV文件路径
    """
    logger.info(f"开始处理文件: {input_path}")
    
    # 读取CSV文件
    df = pd.read_csv(input_path)
    
    # 确保post_title列存在
    if 'post_title' not in df.columns:
        logger.error("CSV文件中缺少'post_title'列")
        return
    
    logger.info(f"需要预测的行数: {len(df)}")
    
    # 添加新列用于存储预测结果
    df['predicted_sentiment'] = None
    df['confidence'] = None
    df['negative_prob'] = None
    df['neutral_prob'] = None
    df['positive_prob'] = None
    
    # 逐行预测
    for idx, row in df.iterrows():
        text = str(row['post_title'])
        sentiment, confidence, probs = predictor.predict(text)
        
        df.at[idx, 'predicted_sentiment'] = sentiment
        df.at[idx, 'confidence'] = confidence
        df.at[idx, 'negative_prob'] = probs[0]
        df.at[idx, 'neutral_prob'] = probs[1]
        df.at[idx, 'positive_prob'] = probs[2]
        
        if idx % 100 == 0:
            logger.info(f"已处理 {idx}/{len(df)} 行")
    
    # 如果没有指定输出文件，生成默认输出路径
    if output_path is None:
        base_name = os.path.basename(input_path)
        name_without_ext = os.path.splitext(base_name)[0]
        output_path = f"{name_without_ext}_predicted.csv"
    
    # 保存结果
    df.to_csv(output_path, index=False, encoding='utf-8-sig')
    logger.info(f"预测完成，结果已保存到: {output_path}")
    
    # 显示情感分布
    sentiment_counts = df['predicted_sentiment'].value_counts()
    logger.info(f"预测情感分布:\n{sentiment_counts}")
    
    # 如果有真实情感标签，计算一致性
    if 'sentiment' in df.columns:
        match_count = (df['predicted_sentiment'] == df['sentiment']).sum()
        match_rate = match_count / len(df)
        logger.info(f"与真实标签一致数: {match_count}/{len(df)} ({match_rate:.2%})")
    
    return df

# 提供便捷的预测单个文本的函数
def predict_text(predictor, text):
    """
    预测单个文本的情感
    
    参数:
        predictor: 情感预测器实例
        text: 输入文本
    """
    sentiment, confidence, probs = predictor.predict(text)
    result = {
        "文本": text,
        "预测情感": sentiment,
        "置信度": f"{confidence:.4f}",
        "负面概率": f"{probs[0]:.4f}",
        "中性概率": f"{probs[1]:.4f}",
        "正面概率": f"{probs[2]:.4f}"
    }
    
    return result

# 直接运行训练（在Jupyter中可以直接调用这个函数）
if __name__ == "__main__":
    model, tokenizer, stats_df = train_model() 

class FinBertTrainer:
    """FinBERT模型训练器类，用于微调FinBERT模型进行金融文本情感分析"""
    
    def __init__(self, 
                 model_name=MODEL_NAME, 
                 max_seq_length=MAX_LENGTH,
                 batch_size=BATCH_SIZE,
                 learning_rate=LEARNING_RATE,
                 adam_epsilon=ADAM_EPSILON,
                 weight_decay=0.01,
                 warmup_steps=WARMUP_STEPS,
                 num_epochs=EPOCHS,
                 output_dir=OUTPUT_DIR):
        """
        初始化FinBERT训练器
        
        参数:
            model_name: 预训练模型名称或路径
            max_seq_length: 最大序列长度
            batch_size: 批次大小
            learning_rate: 学习率
            adam_epsilon: Adam优化器的epsilon参数
            weight_decay: 权重衰减
            warmup_steps: 预热步数
            num_epochs: 训练轮数
            output_dir: 模型保存目录
        """
        self.model_name = model_name
        self.max_seq_length = max_seq_length
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.adam_epsilon = adam_epsilon
        self.weight_decay = weight_decay
        self.warmup_steps = warmup_steps
        self.num_epochs = num_epochs
        self.output_dir = output_dir
        
        # 检查CUDA是否可用
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        logger.info(f"使用设备: {self.device}")
        
        # 加载分词器和模型
        self._load_tokenizer_and_model()
        
    def _load_tokenizer_and_model(self):
        """加载分词器和预训练模型"""
        logger.info(f"从 {self.model_name} 加载分词器和模型...")
        
        try:
            self.tokenizer = BertTokenizer.from_pretrained(self.model_name)
            self.model = BertForSequenceClassification.from_pretrained(
                self.model_name,
                num_labels=3,  # 负面、中性、正面
                output_attentions=False,
                output_hidden_states=False
            )
            self.model.to(self.device)
            logger.info("分词器和模型加载成功")
        except Exception as e:
            logger.error(f"模型加载失败: {str(e)}")
            raise
    
    def prepare_data(self, df, text_column, label_column, test_size=0.2, random_state=42):
        """
        准备训练数据
        
        参数:
            df: 包含文本和标签的DataFrame
            text_column: 文本列名
            label_column: 标签列名
            test_size: 测试集比例
            random_state: 随机种子
            
        返回:
            train_dataloader: 训练数据加载器
            validation_dataloader: 验证数据加载器
            label_dict: 标签字典
        """
        logger.info("准备训练数据...")
        
        # 创建标签映射
        unique_labels = df[label_column].unique()
        label_dict = {label: i for i, label in enumerate(unique_labels)}
        logger.info(f"标签映射: {label_dict}")
        
        # 编码标签
        df['label_id'] = df[label_column].map(label_dict)
        
        # 分割训练集和验证集
        train_df, val_df = train_test_split(
            df, 
            test_size=test_size, 
            random_state=random_state, 
            stratify=df['label_id']
        )
        
        logger.info(f"训练集大小: {len(train_df)}, 验证集大小: {len(val_df)}")
        
        # 创建数据加载器
        train_dataloader = self._create_dataloader(train_df, text_column, 'label_id', True)
        validation_dataloader = self._create_dataloader(val_df, text_column, 'label_id', False)
        
        logger.info("数据准备完成")
        return train_dataloader, validation_dataloader, label_dict
    
    def _create_dataloader(self, df, text_column, label_column, is_train):
        """
        创建数据加载器
        
        参数:
            df: DataFrame
            text_column: 文本列名
            label_column: 标签列名
            is_train: 是否为训练数据
            
        返回:
            dataloader: 数据加载器
        """
        # 获取文本和标签
        texts = df[text_column].values
        labels = df[label_column].values
        
        # 对文本进行编码
        input_ids = []
        attention_masks = []
        
        for text in tqdm(texts, desc="编码文本"):
            encoded_dict = self.tokenizer.encode_plus(
                text,
                add_special_tokens=True,
                max_length=self.max_seq_length,
                padding='max_length',
                truncation=True,
                return_attention_mask=True,
                return_tensors='pt'
            )
            
            input_ids.append(encoded_dict['input_ids'])
            attention_masks.append(encoded_dict['attention_mask'])
        
        # 转换为张量
        input_ids = torch.cat(input_ids, dim=0)
        attention_masks = torch.cat(attention_masks, dim=0)
        labels = torch.tensor(labels)
        
        # 创建数据集
        dataset = TensorDataset(input_ids, attention_masks, labels)
        
        # 创建数据加载器
        sampler = RandomSampler(dataset) if is_train else SequentialSampler(dataset)
        dataloader = DataLoader(
            dataset,
            sampler=sampler,
            batch_size=self.batch_size
        )
        
        return dataloader
    
    def train(self, train_dataloader, validation_dataloader=None):
        """
        训练模型
        
        参数:
            train_dataloader: 训练数据加载器
            validation_dataloader: 验证数据加载器
            
        返回:
            训练历史记录
        """
        logger.info("开始训练模型...")
        
        # 创建输出目录
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        
        # 准备优化器
        optimizer = self._prepare_optimizer()
        
        # 计算总训练步数
        total_steps = len(train_dataloader) * self.num_epochs
        
        # 创建学习率调度器
        scheduler = get_linear_schedule_with_warmup(
            optimizer,
            num_warmup_steps=self.warmup_steps,
            num_training_steps=total_steps
        )
        
        # 跟踪训练历史
        history = {
            'train_loss': [],
            'val_loss': [],
            'val_accuracy': []
        }
        
        # 训练
        logger.info(f"开始训练，共 {self.num_epochs} 轮...")
        
        for epoch in range(self.num_epochs):
            logger.info(f"轮次 {epoch+1}/{self.num_epochs}")
            
            # 训练一轮
            epoch_loss = self._train_epoch(train_dataloader, optimizer, scheduler)
            history['train_loss'].append(epoch_loss)
            
            # 保存模型
            epoch_output_dir = os.path.join(self.output_dir, f"finbert_finetuned_epoch_{epoch+1}")
            if not os.path.exists(epoch_output_dir):
                os.makedirs(epoch_output_dir)
            
            logger.info(f"保存模型到 {epoch_output_dir}")
            self.model.save_pretrained(epoch_output_dir)
            self.tokenizer.save_pretrained(epoch_output_dir)
            
            # 评估模型
            if validation_dataloader:
                val_loss, val_accuracy = self._evaluate(validation_dataloader)
                history['val_loss'].append(val_loss)
                history['val_accuracy'].append(val_accuracy)
                logger.info(f"验证集结果 - 损失: {val_loss:.4f}, 准确率: {val_accuracy:.4f}")
        
        logger.info("训练完成")
        
        # 可视化训练历史
        if validation_dataloader:
            self._visualize_training_history(history)
        
        return history
    
    def _prepare_optimizer(self):
        """
        准备优化器
        
        返回:
            optimizer: AdamW优化器
        """
        # 收集需要更新梯度的参数
        param_optimizer = list(self.model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        
        optimizer_grouped_parameters = [
            {
                'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
                'weight_decay': self.weight_decay
            },
            {
                'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
                'weight_decay': 0.0
            }
        ]
        
        optimizer = AdamW(
            optimizer_grouped_parameters,
            lr=self.learning_rate,
            eps=self.adam_epsilon
        )
        
        return optimizer
    
    def _train_epoch(self, train_dataloader, optimizer, scheduler):
        """
        训练一个轮次
        
        参数:
            train_dataloader: 训练数据加载器
            optimizer: 优化器
            scheduler: 学习率调度器
            
        返回:
            平均训练损失
        """
        self.model.train()
        total_loss = 0
        
        # 训练循环
        progress_bar = tqdm(train_dataloader, desc="训练中")
        
        for batch in progress_bar:
            # 将数据移动到设备上
            b_input_ids = batch[0].to(self.device)
            b_attention_mask = batch[1].to(self.device)
            b_labels = batch[2].to(self.device)
            
            # 清零梯度
            self.model.zero_grad()
            
            # 前向传播
            outputs = self.model(
                b_input_ids,
                token_type_ids=None,
                attention_mask=b_attention_mask,
                labels=b_labels
            )
            
            loss = outputs.loss
            total_loss += loss.item()
            
            # 反向传播
            loss.backward()
            
            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
            
            # 更新参数
            optimizer.step()
            scheduler.step()
            
            # 更新进度条
            progress_bar.set_postfix({'loss': f'{loss.item():.4f}'})
        
        avg_train_loss = total_loss / len(train_dataloader)
        logger.info(f"平均训练损失: {avg_train_loss:.4f}")
        
        return avg_train_loss
    
    def _evaluate(self, validation_dataloader):
        """
        评估模型
        
        参数:
            validation_dataloader: 验证数据加载器
            
        返回:
            avg_val_loss: 平均验证损失
            accuracy: 准确率
        """
        logger.info("评估模型...")
        
        self.model.eval()
        
        val_loss = 0
        predictions = []
        true_labels = []
        
        for batch in tqdm(validation_dataloader, desc="评估中"):
            # 将数据移动到设备上
            b_input_ids = batch[0].to(self.device)
            b_attention_mask = batch[1].to(self.device)
            b_labels = batch[2].to(self.device)
            
            # 不计算梯度
            with torch.no_grad():
                # 前向传播
                outputs = self.model(
                    b_input_ids,
                    token_type_ids=None,
                    attention_mask=b_attention_mask,
                    labels=b_labels
                )
                
                loss = outputs.loss
                logits = outputs.logits
                
            val_loss += loss.item()
            
            # 移动logits和标签到CPU
            logits = logits.detach().cpu().numpy()
            label_ids = b_labels.to('cpu').numpy()
            
            # 保存预测和真实标签
            predictions.extend(np.argmax(logits, axis=1))
            true_labels.extend(label_ids)
        
        # 计算平均损失和准确率
        avg_val_loss = val_loss / len(validation_dataloader)
        accuracy = (np.array(predictions) == np.array(true_labels)).mean()
        
        # 计算并显示分类报告
        report = classification_report(true_labels, predictions, target_names=['负面', '中性', '正面'])
        logger.info(f"分类报告:\n{report}")
        
        # 计算并显示混淆矩阵
        cm = confusion_matrix(true_labels, predictions)
        self._plot_confusion_matrix(cm, ['负面', '中性', '正面'])
        
        return avg_val_loss, accuracy
    
    def _plot_confusion_matrix(self, cm, classes):
        """
        绘制混淆矩阵
        
        参数:
            cm: 混淆矩阵
            classes: 类别名称
        """
        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=classes, yticklabels=classes)
        plt.title('混淆矩阵')
        plt.ylabel('真实标签')
        plt.xlabel('预测标签')
        plt.tight_layout()
        plt.show()
    
    def _visualize_training_history(self, history):
        """
        可视化训练历史
        
        参数:
            history: 训练历史记录
        """
        plt.figure(figsize=(12, 4))
        
        # 绘制损失曲线
        plt.subplot(1, 2, 1)
        plt.plot(history['train_loss'], 'b-o', label='训练损失')
        plt.plot(history['val_loss'], 'r-o', label='验证损失')
        plt.title('训练和验证损失')
        plt.xlabel('轮次')
        plt.ylabel('损失')
        plt.legend()
        
        # 绘制准确率曲线
        plt.subplot(1, 2, 2)
        plt.plot(history['val_accuracy'], 'g-o', label='验证准确率')
        plt.title('验证准确率')
        plt.xlabel('轮次')
        plt.ylabel('准确率')
        plt.ylim([0, 1])
        
        plt.tight_layout()
        plt.show()
    
    def predict(self, texts, batch_size=16):
        """
        预测文本的情感
        
        参数:
            texts: 字符串或字符串列表，要预测的文本
            batch_size: 批处理大小
            
        返回:
            字典或字典列表，包含预测结果
        """
        # 如果输入是单个字符串，则转换为列表
        if isinstance(texts, str):
            texts = [texts]
            single_input = True
        else:
            single_input = False
        
        results = []
        
        # 批处理预测
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i+batch_size]
            batch_results = self._predict_batch(batch_texts)
            results.extend(batch_results)
        
        # 如果输入是单个字符串，则返回单个结果
        return results[0] if single_input else results
    
    def _predict_batch(self, texts):
        """
        批量预测文本的情感
        
        参数:
            texts: 字符串列表，要预测的文本
            
        返回:
            字典列表，包含预测结果
        """
        self.model.eval()
        
        # 对文本进行编码
        inputs = self.tokenizer(
            texts,
            padding=True,
            truncation=True,
            max_length=self.max_seq_length,
            return_tensors="pt"
        )
        
        # 将输入移动到设备上
        inputs = {k: v.to(self.device) for k, v in inputs.items()}
        
        # 预测
        with torch.no_grad():
            outputs = self.model(**inputs)
            logits = outputs.logits
            probabilities = torch.nn.functional.softmax(logits, dim=1)
        
        # 将结果转为numpy数组
        probs_numpy = probabilities.cpu().numpy()
        
        # 获取预测结果
        results = []
        for i, text in enumerate(texts):
            probs = probs_numpy[i]
            sentiment_idx = np.argmax(probs)
            sentiment = ['负面', '中性', '正面'][sentiment_idx]
            confidence = probs[sentiment_idx]
            
            result = {
                'text': text,
                'predicted_sentiment': sentiment,
                'confidence': confidence,
                'negative_prob': probs[0],
                'neutral_prob': probs[1],
                'positive_prob': probs[2]
            }
            results.append(result)
        
        return results

def create_trainer(model_name=MODEL_NAME,
                   max_seq_length=MAX_LENGTH,
                   batch_size=BATCH_SIZE,
                   learning_rate=LEARNING_RATE,
                   num_epochs=EPOCHS,
                   output_dir=OUTPUT_DIR):
    """
    创建FinBERT训练器

    参数:
        model_name: 预训练模型名称或路径
        max_seq_length: 最大序列长度
        batch_size: 批次大小
        learning_rate: 学习率
        num_epochs: 训练轮数
        output_dir: 模型保存目录

    返回:
        FinBertTrainer: 训练器实例
    """
    return FinBertTrainer(
        model_name=model_name,
        max_seq_length=max_seq_length,
        batch_size=batch_size,
        learning_rate=learning_rate,
        num_epochs=num_epochs,
        output_dir=output_dir
    )