import torch
from transformers import BertTokenizer, BertModel
from torch.utils.data import DataLoader
from utils.preprocess import WeiboDataset
from models.sentiment_model import SentimentClassifier
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import time
from datetime import datetime

BATCH_SIZE = 16  #32
EPOCHS = 20  #3
LEARNING_RATE = 1e-5  #2e-5
MAX_LEN = 64  #64
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

def format_time(seconds):
    """将秒数转换为时分秒格式"""
    return time.strftime('%H:%M:%S', time.gmtime(seconds))

def train_model():
    # 加载数据
    print("正在加载数据...")
    df = pd.read_csv('data/weibo_train.csv', encoding='utf-8')

    # # 分割数据集，并且只取前20条数据
    # train_texts, val_texts, train_labels, val_labels = train_test_split(
    #     df['review'].values[:20], df['label'].values[:20], test_size=0.5, random_state=42
    # )

    # 分割数据集
    train_texts, val_texts, train_labels, val_labels = train_test_split(
        df['review'].values, df['label'].values, test_size=0.1, random_state=42
    )

    print("正在初始化模型...")
    # 初始化tokenizer和模型
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    bert_model = BertModel.from_pretrained('bert-base-chinese')
    model = SentimentClassifier(n_classes=2, pretrained_model=bert_model)
    model = model.to(DEVICE)

    # 创建数据加载器
    train_dataset = WeiboDataset(train_texts, train_labels, tokenizer, MAX_LEN)
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)

    val_dataset = WeiboDataset(val_texts, val_labels, tokenizer, MAX_LEN)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE)

    # 设置优化器和损失函数
    optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE)
    criterion = torch.nn.CrossEntropyLoss()

    # 训练循环
    print(f"\n开始训练，共 {EPOCHS} 轮...")
    best_val_acc = 0
    start_time = time.time()

    for epoch in range(EPOCHS):
        model.train()
        total_loss = 0
        correct_predictions = 0
        total_predictions = 0
        
        # 使用tqdm创建进度条
        train_pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{EPOCHS} [训练]')
        
        for batch in train_pbar:
            input_ids = batch['input_ids'].to(DEVICE)
            attention_mask = batch['attention_mask'].to(DEVICE)
            labels = batch['label'].to(DEVICE)

            optimizer.zero_grad()
            outputs = model(input_ids=input_ids, attention_mask=attention_mask)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # 计算训练准确率
            predictions = torch.argmax(outputs, dim=1)
            correct_predictions += (predictions == labels).sum().item()
            total_predictions += labels.size(0)
            
            # 更新进度条信息
            total_loss += loss.item()
            avg_loss = total_loss / (train_pbar.n + 1)
            current_acc = correct_predictions / total_predictions
            
            train_pbar.set_postfix({
                'loss': f'{avg_loss:.4f}',
                'acc': f'{current_acc:.4f}'
            })

        # 验证
        model.eval()
        val_loss = 0
        val_correct = 0
        val_total = 0
        
        val_pbar = tqdm(val_loader, desc=f'Epoch {epoch + 1}/{EPOCHS} [验证]')
        
        with torch.no_grad():
            for batch in val_pbar:
                input_ids = batch['input_ids'].to(DEVICE)
                attention_mask = batch['attention_mask'].to(DEVICE)
                labels = batch['label'].to(DEVICE)

                outputs = model(input_ids=input_ids, attention_mask=attention_mask)
                val_loss += criterion(outputs, labels).item()
                predictions = torch.argmax(outputs, dim=1)
                val_correct += (predictions == labels).sum().item()
                val_total += labels.size(0)
                
                # 更新验证进度条
                val_pbar.set_postfix({
                    'loss': f'{val_loss/(val_pbar.n+1):.4f}',
                    'acc': f'{val_correct/val_total:.4f}'
                })

        # 计算验证指标
        val_acc = val_correct / val_total
        avg_val_loss = val_loss / len(val_loader)
        
        # 计算训练指标
        train_acc = correct_predictions / total_predictions
        avg_train_loss = total_loss / len(train_loader)
        
        # 计算时间
        elapsed_time = time.time() - start_time
        remaining_time = (elapsed_time / (epoch + 1)) * (EPOCHS - (epoch + 1))
        
        # 打印epoch总结
        print(f'\nEpoch {epoch + 1} 总结:')
        print(f'训练损失: {avg_train_loss:.4f} | 训练准确率: {train_acc:.4f}')
        print(f'验证损失: {avg_val_loss:.4f} | 验证准确率: {val_acc:.4f}')
        print(f'已用时间: {format_time(elapsed_time)} | 预计剩余时间: {format_time(remaining_time)}')
        
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            print("【发现最佳模型】")
            # 保存最佳模型
            save_path = 'models/sentiment_model'
            torch.save({
                'model_state_dict': model.state_dict(),
                'tokenizer': tokenizer,
                'config': {
                    'max_len': MAX_LEN,
                    'n_classes': 2
                }
            }, f'{save_path}/best_model.pth')
        
        print('-' * 50)

    total_time = time.time() - start_time
    print(f"\n训练完成！")
    print(f'总训练时间: {format_time(total_time)}')
    print(f'最终验证准确率: {val_acc:.4f}')
    print(f'最佳验证准确率: {best_val_acc:.4f}')

if __name__ == '__main__':
    print(f"使用设备: {DEVICE}")
    print(f"CUDA是否可用: {torch.cuda.is_available()}")
    if torch.cuda.is_available():
        print(f"当前使用的GPU: {torch.cuda.get_device_name(0)}")
    train_model()