import jieba
import torch
import pandas as pd
import math  # 添加math模块导入
from torch import nn
from torch.utils.data import DataLoader, random_split
from torch.optim import AdamW
import numpy as np
from config import Config
from data_processor import DataGenerator, TextDataset
from model import EnhancedModel


def prepare_data():
    # 生成基础数据
    generator = DataGenerator()
    df = generator.generate(50000)  # 增加到25000条样本

    # 构建词汇表
    all_words = []
    for text in df["text"]:
        all_words.extend(jieba.lcut(text))
    word2idx = {"<PAD>": 0, "<UNK>": 1}
    for word in set(all_words):
        word2idx[word] = len(word2idx)

    # 保存增强数据
    df.to_csv(Config.DATA_PATH, index=False)
    return df, word2idx


import os

def load_pretrained_embeddings(word2idx, embedding_dim, file_path):
    embeddings = np.random.normal(size=(len(word2idx), embedding_dim))
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            values = line.split()
            word = values[0]
            vector = np.asarray(values[1:], dtype='float32')
            if word in word2idx:
                embeddings[word2idx[word]] = vector
    return torch.tensor(embeddings, dtype=torch.float32)

def main():
    cfg = Config()
    
    # Update the embedding dimension to match the pre-trained vectors
    embedding_dim = 200  # Ensure this matches the dimension of the vectors in 100000-small.txt
    
    # 初始化所有跟踪变量
    best_val_acc = 0.0
    best_subj_acc = 0.0
    patience = 0
    
    # 数据准备和模型初始化...
    
    # 单一定义学习率调度器
    # 需要在此处先定义train_loader
    df, word2idx = prepare_data()
    dataset = TextDataset(df, word2idx)
    train_size = int(0.8 * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
    train_loader = DataLoader(train_dataset, batch_size=cfg.BATCH_SIZE, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=cfg.BATCH_SIZE)
    
    total_steps = len(train_loader) * cfg.EPOCHS
    warmup_steps = int(total_steps * 0.2)
    
    def lr_lambda(current_step): 
        if current_step < warmup_steps:
            return float(current_step) / float(max(1, warmup_steps))
        progress = float(current_step - warmup_steps) / float(max(1, total_steps - warmup_steps))
        return 0.5 * (1.0 + math.cos(math.pi * progress))
    
    # 加载预训练词向量
    embedding_dim = cfg.EMBED_DIM
    # Load pre-trained embeddings
    embedding_file_path = os.path.join('/Users/tony/work/python_workspace/train_min_model/data', '100000-small.txt')
    pretrained_embeddings = load_pretrained_embeddings(word2idx, embedding_dim, embedding_file_path)
    
    # Initialize model with correct embedding dimension
    model = EnhancedModel(vocab_size=len(word2idx)).to(cfg.DEVICE)  # Removed embedding_dim argument
    model.embedding.weight.data.copy_(pretrained_embeddings)
    optimizer = AdamW(model.parameters(), lr=cfg.INITIAL_LR, weight_decay=0.02)
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)

    # 修正梯度累积实现
    accumulation_steps = 4

    # 在训练循环中添加改进
    for epoch in range(cfg.EPOCHS):
        model.train()
        total_loss = 0.0
        subj_loss_total = 0.0
        sent_loss_total = 0.0

        # 动态调整损失权重
        progress = epoch / cfg.EPOCHS
        subj_weight = 0.7 * (1 - progress) + 0.3 * progress

        for batch_idx, batch in enumerate(train_loader):
            inputs = batch["input_ids"].to(cfg.DEVICE)
            subj_labels = batch["subject_tags"].to(cfg.DEVICE)
            sent_labels = batch["sentiment"].to(cfg.DEVICE)

            optimizer.zero_grad()

            subj_pred, sent_pred = model(inputs)

            # 计算损失
            subj_criterion = nn.BCEWithLogitsLoss(
                pos_weight=torch.tensor([5.0], device=cfg.DEVICE)
            )
            sent_criterion = nn.CrossEntropyLoss(
                weight=torch.tensor([0.7, 1.3], device=cfg.DEVICE)
            )

            loss_subj = subj_criterion(subj_pred, subj_labels)
            loss_sent = sent_criterion(sent_pred, sent_labels)
            loss = (subj_weight * loss_subj + 
                    (1 - subj_weight) * loss_sent) / accumulation_steps
            
            loss.backward()

            if (batch_idx + 1) % accumulation_steps == 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
                optimizer.step()
                optimizer.zero_grad()
                scheduler.step()

            total_loss += loss.item()
            subj_loss_total += loss_subj.item()
            sent_loss_total += loss_sent.item()

        # 验证阶段
        model.eval()
        val_subj_correct = 0
        val_sent_correct = 0
        val_total_samples = 0
        val_total_tokens = 0
        
        with torch.no_grad(), torch.cuda.amp.autocast():
            for batch in val_loader:
                inputs = batch["input_ids"].to(cfg.DEVICE)
                subj_labels = batch["subject_tags"].to(cfg.DEVICE)
                sent_labels = batch["sentiment"].to(cfg.DEVICE)

                subj_pred, sent_pred = model(inputs)

                # 计算准确率
                mask = (inputs != 0).float()
                val_total_tokens += mask.sum().item()
                
                subj_correct = ((torch.sigmoid(subj_pred) > 0.5) == subj_labels.bool()) * mask
                val_subj_correct += subj_correct.sum().item()
                
                val_sent_correct += (torch.argmax(sent_pred, 1) == sent_labels).sum().item()
                val_total_samples += inputs.size(0)

        val_subj_acc = val_subj_correct / val_total_tokens if val_total_tokens > 0 else 0
        val_sent_acc = val_sent_correct / val_total_samples if val_total_samples > 0 else 0
        current_val_acc = (val_subj_acc * 0.6 + val_sent_acc * 0.4)

        # 早停机制
        if current_val_acc > best_val_acc + cfg.MIN_DELTA:
            best_val_acc = current_val_acc
            torch.save(model.state_dict(), cfg.MODEL_PATH)
            patience = 0
        else:
            patience += 1
            if patience >= cfg.PATIENCE:
                print(f"Early stopping at epoch {epoch + 1}")
                break

        print(f"Epoch {epoch + 1:03d}/{cfg.EPOCHS} | "
              f"Train Loss: {total_loss / len(train_loader):.4f} | "
              f"Subj Loss: {subj_loss_total / len(train_loader):.4f} | "
              f"Sent Loss: {sent_loss_total / len(train_loader):.4f} | "
              f"Subj Acc: {val_subj_acc:.2%} | Sent Acc: {val_sent_acc:.2%} | "
              f"LR: {scheduler.get_last_lr()[0]:.6f}")
        
        if val_subj_acc > best_subj_acc:
            best_subj_acc = val_subj_acc
            torch.save(model.state_dict(), cfg.MODEL_PATH.replace('.pth', '_best_subj.pth'))


if __name__ == "__main__":
    main()