import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from transformers import BertModel, AdamW, get_linear_schedule_with_warmup
from models import BertTextCNN
from dataset_preprocessing import ClassificationDataset
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
import argparse
import logging
import time
import gc

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("training.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

def train(args):
    # 设置设备
    device = torch.device("cpu")
    logger.info(f"使用设备: {device}")

    # 创建数据集
    logger.info("加载训练集和验证集...")
    train_dataset = ClassificationDataset(args.train_file, max_length=args.max_length)
    valid_dataset = ClassificationDataset(args.valid_file, max_length=args.max_length)

    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size)

    # 创建模型
    logger.info(f"初始化模型: BertTextCNN...")
    model = BertTextCNN(
        bert_model_path=args.bert_model_path,
        num_labels=args.num_labels,
        filter_sizes=tuple(map(int, args.filter_sizes.split(','))),
        num_filters=args.num_filters
    )
    model.to(device)

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    
    # 使用不同的学习率
    bert_params = list(model.bert.parameters())
    other_params = [p for n, p in model.named_parameters() if not n.startswith('bert.')]
    
    optimizer = AdamW([
        {'params': bert_params, 'lr': args.learning_rate / 10},  # BERT部分使用较小的学习率
        {'params': other_params, 'lr': args.learning_rate}
    ], weight_decay=args.weight_decay)
    
    # 学习率调度器
    total_steps = len(train_loader) * args.epochs
    scheduler = get_linear_schedule_with_warmup(
        optimizer, 
        num_warmup_steps=int(total_steps * 0.1),
        num_training_steps=total_steps
    )

    # 训练循环
    best_f1 = 0.0
    for epoch in range(args.epochs):
        logger.info(f"开始第 {epoch+1}/{args.epochs} 轮训练")
        model.train()
        train_loss = 0.0
        start_time = time.time()

        for batch_idx, batch in enumerate(train_loader):
            input_ids = batch['input_ids'].to(device)
            labels = batch['labels'].to(device)

            # 前向传播
            optimizer.zero_grad()
            outputs = model(input_ids)
            loss = criterion(outputs, labels)

            # 反向传播
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
            optimizer.step()
            scheduler.step()

            train_loss += loss.item()

            if (batch_idx + 1) % args.log_interval == 0:
                logger.info(f"Epoch {epoch+1}/{args.epochs}, Batch {batch_idx+1}/{len(train_loader)}, Loss: {loss.item():.4f}")
                
            # 释放一些内存
            del input_ids, labels, outputs, loss
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            gc.collect()

        avg_train_loss = train_loss / len(train_loader)
        logger.info(f"第 {epoch+1} 轮平均训练损失: {avg_train_loss:.4f}, 用时: {time.time() - start_time:.2f}秒")

        # 验证
        logger.info("开始验证...")
        model.eval()
        valid_loss = 0.0
        all_preds = []
        all_labels = []

        with torch.no_grad():
            for batch in valid_loader:
                input_ids = batch['input_ids'].to(device)
                labels = batch['labels'].to(device)

                outputs = model(input_ids)
                loss = criterion(outputs, labels)
                valid_loss += loss.item()

                _, preds = torch.max(outputs, 1)
                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
                
                # 释放一些内存
                del input_ids, labels, outputs, loss
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
                gc.collect()

        avg_valid_loss = valid_loss / len(valid_loader)
        accuracy = accuracy_score(all_labels, all_preds)
        f1 = f1_score(all_labels, all_preds, average='macro')
        
        logger.info(f"验证损失: {avg_valid_loss:.4f}, 准确率: {accuracy:.4f}, F1分数: {f1:.4f}")

        # 保存最佳模型
        if f1 > best_f1:
            best_f1 = f1
            torch.save(model.state_dict(), args.output_dir + "/best_model.pth")
            logger.info(f"保存最佳模型，F1分数: {f1:.4f}")

    # 保存最终模型
    torch.save(model.state_dict(), args.output_dir + "/final_model.pth")
    logger.info("训练完成，已保存最终模型")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="训练BertTextCNN模型进行文本分类")
    parser.add_argument("--train_file", type=str, default="train.csv", help="训练数据文件路径")
    parser.add_argument("--valid_file", type=str, default="valid.csv", help="验证数据文件路径")
    parser.add_argument("--bert_model_path", type=str, required=True, help="预训练BERT模型路径")
    parser.add_argument("--output_dir", type=str, default="./saved_models", help="模型保存目录")
    parser.add_argument("--max_length", type=int, default=1024, help="最大序列长度")
    parser.add_argument("--batch_size", type=int, default=2, help="批量大小")
    parser.add_argument("--epochs", type=int, default=3, help="训练轮数")
    parser.add_argument("--learning_rate", type=float, default=1e-4, help="学习率")
    parser.add_argument("--weight_decay", type=float, default=0.01, help="权重衰减")
    parser.add_argument("--max_grad_norm", type=float, default=1.0, help="梯度裁剪最大范数")
    parser.add_argument("--num_labels", type=int, default=14, help="分类标签数量")
    parser.add_argument("--filter_sizes", type=str, default="2,3,4", help="卷积核大小，用逗号分隔")
    parser.add_argument("--num_filters", type=int, default=50, help="每种卷积核的数量")
    parser.add_argument("--log_interval", type=int, default=5, help="日志打印间隔（批次数）")
    
    args = parser.parse_args()
    
    # 创建输出目录
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
        
    train(args) 