import torch
from transformers import BertTokenizer, BertModel
from torch.utils.data import DataLoader
from utils.preprocess import WeiboDataset
from models.sentiment_model import SentimentClassifier
import pandas as pd
from sklearn.model_selection import train_test_split

BATCH_SIZE = 16  #32
EPOCHS = 5  #3
LEARNING_RATE = 1e-5  #2e-5
MAX_LEN = 128  #64
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

def train_model():
    # 加载数据
    df = pd.read_csv('data/weibo_train.csv', encoding='utf-8')

    # # 分割数据集 并且只取前20条数据
    # train_texts, val_texts, train_labels, val_labels = train_test_split(
    #     df['review'].values[:20], df['label'].values[:20], test_size=0.5, random_state=42
    # )

    # 分割数据集
    train_texts, val_texts, train_labels, val_labels = train_test_split(
        df['review'].values, df['label'].values, test_size=0.1, random_state=42
    )

    # 初始化tokenizer和模型
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    bert_model = BertModel.from_pretrained('bert-base-chinese')
    model = SentimentClassifier(n_classes=2, pretrained_model=bert_model)
    model = model.to(DEVICE)

    # 创建数据加载器
    train_dataset = WeiboDataset(train_texts, train_labels, tokenizer, MAX_LEN)
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)

    val_dataset = WeiboDataset(val_texts, val_labels, tokenizer, MAX_LEN)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE)

    # 设置优化器和损失函数
    optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE)
    criterion = torch.nn.CrossEntropyLoss()

    # 训练循环
    print(f"\n开始训练，共 {EPOCHS} 轮...")
    best_val_acc = 0

    for epoch in range(EPOCHS):
        model.train()
        total_loss = 0
        batch_count = len(train_loader)
        print(f"\nEpoch {epoch + 1}/{EPOCHS}:")

        # 添加训练进度显示
        for batch_idx, batch in enumerate(train_loader, 1):
            input_ids = batch['input_ids'].to(DEVICE)
            attention_mask = batch['attention_mask'].to(DEVICE)
            labels = batch['label'].to(DEVICE)

            optimizer.zero_grad()
            outputs = model(input_ids=input_ids, attention_mask=attention_mask)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            total_loss += loss.item()

            # 打印批次进度
            if batch_idx % 100 == 0:  # 每10个批次打印一次
                print(f"处理进度: {batch_idx}/{batch_count} 批次 "
                      f"({(batch_idx/batch_count)*100:.1f}%), "
                      f"当前批次损失: {loss.item():.4f}")

        # 验证
        model.eval()
        val_acc = 0
        val_count = 0
        val_loss = 0

        with torch.no_grad():
            for batch in val_loader:
                input_ids = batch['input_ids'].to(DEVICE)
                attention_mask = batch['attention_mask'].to(DEVICE)
                labels = batch['label'].to(DEVICE)

                outputs = model(input_ids=input_ids, attention_mask=attention_mask)
                val_loss += criterion(outputs, labels).item()
                predictions = torch.argmax(outputs, dim=1)
                val_acc += (predictions == labels).sum().item()
                val_count += labels.size(0)

        current_val_acc = val_acc / val_count
        if current_val_acc > best_val_acc:
            best_val_acc = current_val_acc
            print("【发现最佳模型】")

        print(f'\nEpoch {epoch + 1} 训练完成:')
        print(f'平均训练损失: {total_loss / batch_count:.4f}')
        print(f'平均验证损失: {val_loss / len(val_loader):.4f}')
        print(f'验证准确率: {current_val_acc:.4f}')
        print(f'最佳验证准确率: {best_val_acc:.4f}\n')
        print('-' * 50)

    print(f"\n训练完成！最终验证准确率: {current_val_acc:.4f}")
    # 保存模型
    save_path = 'models/sentiment_model'
    torch.save({
        'model_state_dict': model.state_dict(),
        'tokenizer': tokenizer,
        'config': {
            'max_len': MAX_LEN,
            'n_classes': 2
        }
    }, f'{save_path}/model.pth')

if __name__ == '__main__':
    print(f"使用设备: {DEVICE}")
    print(f"CUDA是否可用: {torch.cuda.is_available()}")
    if torch.cuda.is_available():
        print(f"当前使用的GPU: {torch.cuda.get_device_name(0)}")
    train_model()