import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertForSequenceClassification, AdamW
from transformers import get_linear_schedule_with_warmup
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, classification_report
import argparse
import os
import time
import logging
import random

# 设置日志
logging.basicConfig(
    format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
    datefmt="%Y/%m/%d %H:%M:%S",
    level=logging.INFO,
)
logger = logging.getLogger(__name__)

# 设置随机种子以保证可重复性
def set_seed(seed_value=42):
    random.seed(seed_value)
    np.random.seed(seed_value)
    torch.manual_seed(seed_value)
    torch.cuda.manual_seed_all(seed_value)

# 定义数据集类
class FinancialDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_length=128):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length
        
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = str(self.texts[idx])
        label = self.labels[idx]
        
        # 对文本进行编码
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_length,
            return_token_type_ids=True,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt'
        )
        
        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'token_type_ids': encoding['token_type_ids'].flatten(),
            'labels': torch.tensor(label, dtype=torch.long)
        }

# 读取和预处理数据
def load_data(data_path):
    logger.info(f"加载数据集: {data_path}")
    df = pd.read_csv(data_path)
    
    # 显示数据样例
    logger.info(f"数据样例:\n{df.head()}")
    
    # 检查情感标签并转换为数字
    sentiment_map = {'负面': 0, '中性': 1, '正面': 2}
    
    # 创建标签列，将情感映射为数字
    df['label'] = df['sentiment'].map(sentiment_map)
    
    # 处理可能的缺失值
    df = df.dropna(subset=['post_title', 'label'])
    
    # 确认标签分布
    label_counts = df['label'].value_counts()
    logger.info(f"标签分布:\n{label_counts}")
    
    # 分割数据集
    train_texts, val_texts, train_labels, val_labels = train_test_split(
        df['post_title'].values, 
        df['label'].values,
        test_size=0.1,
        random_state=42,
        stratify=df['label'] if len(df['label'].unique()) > 1 else None
    )
    
    logger.info(f"训练集大小: {len(train_texts)}, 验证集大小: {len(val_texts)}")
    
    return train_texts, val_texts, train_labels, val_labels

# 训练和评估模型
def train_model(args):
    # 设置随机种子
    set_seed(args.seed)
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logger.info(f"使用设备: {device}")
    
    # 加载FinBERT tokenizer
    tokenizer = BertTokenizer.from_pretrained(args.model_name)
    logger.info(f"加载tokenizer: {args.model_name}")
    
    # 加载数据
    train_texts, val_texts, train_labels, val_labels = load_data(args.data_path)
    
    # 创建数据集
    train_dataset = FinancialDataset(train_texts, train_labels, tokenizer, args.max_length)
    val_dataset = FinancialDataset(val_texts, val_labels, tokenizer, args.max_length)
    
    # 创建数据加载器
    train_dataloader = DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers
    )
    
    val_dataloader = DataLoader(
        val_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers
    )
    
    # 加载预训练模型
    model = BertForSequenceClassification.from_pretrained(
        args.model_name,
        num_labels=3,  # 负面、中性、正面
        output_attentions=False,
        output_hidden_states=False,
    )
    model.to(device)
    logger.info(f"加载模型: {args.model_name}")
    
    # 设置优化器
    optimizer = AdamW(model.parameters(), lr=args.learning_rate, eps=args.adam_epsilon)
    
    # 计算总训练步数
    total_steps = len(train_dataloader) * args.epochs
    
    # 创建学习率调度器
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=args.warmup_steps,
        num_training_steps=total_steps
    )
    
    # 开始训练
    logger.info("开始训练...")
    best_val_f1 = 0.0
    
    for epoch in range(args.epochs):
        logger.info(f"Epoch {epoch+1}/{args.epochs}")
        
        # 训练阶段
        model.train()
        train_loss = 0
        
        for batch in train_dataloader:
            # 将数据放到指定设备上
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            token_type_ids = batch['token_type_ids'].to(device)
            labels = batch['labels'].to(device)
            
            # 清空梯度
            optimizer.zero_grad()
            
            # 前向传播
            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                token_type_ids=token_type_ids,
                labels=labels
            )
            
            loss = outputs.loss
            train_loss += loss.item()
            
            # 反向传播
            loss.backward()
            
            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            
            # 更新参数
            optimizer.step()
            scheduler.step()
        
        avg_train_loss = train_loss / len(train_dataloader)
        logger.info(f"训练损失: {avg_train_loss:.4f}")
        
        # 评估阶段
        model.eval()
        val_loss = 0
        predictions = []
        true_labels = []
        
        for batch in val_dataloader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            token_type_ids = batch['token_type_ids'].to(device)
            labels = batch['labels'].to(device)
            
            with torch.no_grad():
                outputs = model(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    token_type_ids=token_type_ids,
                    labels=labels
                )
            
            loss = outputs.loss
            val_loss += loss.item()
            
            # 获取预测结果
            logits = outputs.logits
            preds = torch.argmax(logits, dim=1).cpu().numpy()
            
            predictions.extend(preds)
            true_labels.extend(labels.cpu().numpy())
        
        avg_val_loss = val_loss / len(val_dataloader)
        val_accuracy = accuracy_score(true_labels, predictions)
        val_f1 = f1_score(true_labels, predictions, average='weighted')
        
        logger.info(f"验证损失: {avg_val_loss:.4f}")
        logger.info(f"验证准确率: {val_accuracy:.4f}")
        logger.info(f"验证F1分数: {val_f1:.4f}")
        
        # 打印分类报告
        report = classification_report(true_labels, predictions, target_names=['负面', '中性', '正面'])
        logger.info(f"分类报告:\n{report}")
        
        # 保存最佳模型
        if val_f1 > best_val_f1:
            best_val_f1 = val_f1
            logger.info(f"发现更好的模型，F1={val_f1:.4f}，保存模型...")
            
            if not os.path.exists(args.output_dir):
                os.makedirs(args.output_dir)
            
            model_path = os.path.join(args.output_dir, f"finbert_finetuned_epoch_{epoch+1}")
            model.save_pretrained(model_path)
            tokenizer.save_pretrained(model_path)
            
            logger.info(f"模型保存到: {model_path}")
    
    logger.info("训练完成！")
    return model, tokenizer

def main():
    parser = argparse.ArgumentParser(description="微调FinBERT用于情感分析")
    
    parser.add_argument("--data_path", type=str, default="taged_data_by_llm.csv",
                        help="训练数据路径")
    parser.add_argument("--model_name", type=str, default="yiyanghkust/finbert-tone",
                        help="预训练模型名称或路径")
    parser.add_argument("--output_dir", type=str, default="./finbert_finetuned",
                        help="保存微调模型的目录")
    parser.add_argument("--max_length", type=int, default=128,
                        help="最大序列长度")
    parser.add_argument("--batch_size", type=int, default=16,
                        help="训练批次大小")
    parser.add_argument("--learning_rate", type=float, default=2e-5,
                        help="学习率")
    parser.add_argument("--adam_epsilon", type=float, default=1e-8,
                        help="Adam优化器的epsilon参数")
    parser.add_argument("--epochs", type=int, default=3,
                        help="训练轮数")
    parser.add_argument("--warmup_steps", type=int, default=0,
                        help="预热步数")
    parser.add_argument("--seed", type=int, default=42,
                        help="随机种子")
    parser.add_argument("--num_workers", type=int, default=4,
                        help="数据加载的工作线程数")
    
    args = parser.parse_args()
    
    # 训练模型
    model, tokenizer = train_model(args)

if __name__ == "__main__":
    main() 