import torch
from torch import nn
from torch.optim import AdamW
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score

from src.bert.data_preprocess import config
from src.bert.my_dataloader import my_dataLoader
from src.bert.my_model import BertClassifier
from torch.optim.lr_scheduler import ReduceLROnPlateau

def bt_train():
    train_loader = my_dataLoader(config.train_path)
    # 添加验证集数据加载器
    val_loader = my_dataLoader(config.test_path)

    model = BertClassifier()
    model.to(config.device)

    optimizer = AdamW(model.parameters(), lr=config.lr)
    # 添加学习率调度器
    scheduler = ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=2)

    cat_loss_fn = nn.CrossEntropyLoss()
    label_loss_fn = nn.BCEWithLogitsLoss()

    # 早停参数
    best_val_f1 = 0.0
    patience_counter = 0
    patience_limit = 3

    for epoch in range(config.epochs):
        # 训练阶段
        model.train()
        total_loss = 0
        all_cat_preds = []
        all_cat_labels = []
        all_label_preds = []
        all_label_labels = []

        for batch_idx, (input_ids, attention_mask, labels, cat_labels) in enumerate(tqdm(train_loader)):
            input_ids = input_ids.to(config.device)
            attention_mask = attention_mask.to(config.device)
            labels = labels.float().to(config.device)
            cat_labels = cat_labels.long().to(config.device)

            out_cat, out_label = model(input_ids, attention_mask)

            cat_loss = cat_loss_fn(out_cat, cat_labels)
            label_loss = label_loss_fn(out_label, labels)
            loss = cat_loss + label_loss

            optimizer.zero_grad()
            loss.backward()
            # 添加梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()

            total_loss += loss.item()

            _, predicted_cat = torch.max(out_cat.data, 1)
            predicted_labels = (torch.sigmoid(out_label) > 0.5).float().squeeze()

            all_cat_preds.extend(predicted_cat.cpu().numpy())
            all_cat_labels.extend(cat_labels.cpu().numpy())
            all_label_preds.extend(predicted_labels.cpu().numpy())
            all_label_labels.extend(labels.cpu().numpy())

            if batch_idx % 1000 == 0:
                avg_loss = total_loss / (batch_idx + 1)
                cat_acc = accuracy_score(all_cat_labels, all_cat_preds)
                label_acc = accuracy_score(all_label_labels, all_label_preds)
                print(f'Epoch: {epoch}, Batch: {batch_idx}, Loss: {loss.item():.4f}, '
                      f'Avg Loss: {avg_loss:.4f}, Cat Acc: {cat_acc:.4f}, Label Acc: {label_acc:.4f}')

        # 计算训练集指标
        train_cat_acc = accuracy_score(all_cat_labels, all_cat_preds)
        train_label_acc = accuracy_score(all_label_labels, all_label_preds)
        train_cat_f1 = f1_score(all_cat_labels, all_cat_preds, average='weighted')
        train_label_f1 = f1_score(all_label_labels, all_label_preds, average='weighted')

        # 验证阶段
        model.eval()
        val_cat_preds = []
        val_cat_labels = []
        val_label_preds = []
        val_label_labels = []
        val_total_loss = 0

        with torch.no_grad():
            for input_ids, attention_mask, labels, cat_labels in val_loader:
                input_ids = input_ids.to(config.device)
                attention_mask = attention_mask.to(config.device)
                labels = labels.float().to(config.device)
                cat_labels = cat_labels.long().to(config.device)

                out_cat, out_label = model(input_ids, attention_mask)

                cat_loss = cat_loss_fn(out_cat, cat_labels)
                label_loss = label_loss_fn(out_label, labels)
                loss = cat_loss + label_loss

                val_total_loss += loss.item()

                _, predicted_cat = torch.max(out_cat.data, 1)
                predicted_labels = (torch.sigmoid(out_label) > 0.5).float().squeeze()

                val_cat_preds.extend(predicted_cat.cpu().numpy())
                val_cat_labels.extend(cat_labels.cpu().numpy())
                val_label_preds.extend(predicted_labels.cpu().numpy())
                val_label_labels.extend(labels.cpu().numpy())

        # 计算验证集指标
        val_cat_acc = accuracy_score(val_cat_labels, val_cat_preds)
        val_label_acc = accuracy_score(val_label_labels, val_label_preds)
        val_cat_f1 = f1_score(val_cat_labels, val_cat_preds, average='weighted')
        val_label_f1 = f1_score(val_label_labels, val_label_preds, average='weighted')
        avg_val_f1 = (val_cat_f1 + val_label_f1) / 2

        print(f'Epoch {epoch} finished.')
        print(f'训练集 - Cat Acc: {train_cat_acc:.4f}, Label Acc: {train_label_acc:.4f}, Cat F1: {train_cat_f1:.4f}, Label F1: {train_label_f1:.4f}')
        print(f'测试集 - Cat Acc: {val_cat_acc:.4f}, Label Acc: {val_label_acc:.4f}, Cat F1: {val_cat_f1:.4f}, Label F1: {val_label_f1:.4f}')

        # 更新学习率
        scheduler.step(avg_val_f1)

        # 早停和模型保存逻辑
        if avg_val_f1 > best_val_f1:
            best_val_f1 = avg_val_f1
            patience_counter = 0
            # 仅在性能提升时保存模型
            torch.save(model.state_dict(), config.save_model + 'bt_model.pth')
            print(f"新最佳模型已保存，平均 F1 值为: {avg_val_f1:.4f}")
        else:
            patience_counter += 1
            if patience_counter >= patience_limit:
                print(f"提前停止在 {epoch + 1} epochs 之后被触发")
                break

if __name__ == '__main__':
    bt_train()
