import jieba
import torch
import pandas as pd
import math
from torch import nn
from torch.utils.data import DataLoader, random_split
from torch.optim import AdamW
import numpy as np
from config import Config
from data_processor import DataGenerator, TextDataset
from model import EnhancedModel

def prepare_data():
    generator = DataGenerator()
    df = generator.generate(50000)

    all_words = []
    for text in df["text"]:
        all_words.extend(jieba.lcut(text))
    word2idx = {"<PAD>": 0, "<UNK>": 1}
    for word in set(all_words):
        word2idx[word] = len(word2idx)

    df.to_csv(Config.DATA_PATH, index=False)
    return df, word2idx

def load_pretrained_embeddings(word2idx, embedding_dim, file_path):
    embeddings = np.random.normal(size=(len(word2idx), embedding_dim))
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            values = line.split()
            word = values[0]
            vector = np.asarray(values[1:], dtype='float32')
            if word in word2idx:
                embeddings[word2idx[word]] = vector
    return torch.tensor(embeddings, dtype=torch.float32)

def main():
    cfg = Config()
    df, word2idx = prepare_data()
    
    # Add data analysis before training
    print("\nData Distribution:")
    print("Total samples:", len(df))
    print("Intent distribution:\n", df["intent"].value_counts())
    print("Sentiment distribution:\n", df["sentiment"].value_counts())
    
    dataset = TextDataset(df, word2idx)
    train_size = int(0.8 * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
    train_loader = DataLoader(train_dataset, batch_size=cfg.BATCH_SIZE, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=cfg.BATCH_SIZE)
    
    model = EnhancedModel(vocab_size=len(word2idx)).to(cfg.DEVICE)
    
    # 增加正则化强度
    optimizer = AdamW(model.parameters(), lr=cfg.INITIAL_LR, weight_decay=0.15)
    
    # 调整学习率调度
    total_steps = len(train_loader) * cfg.EPOCHS
    warmup_steps = int(total_steps * 0.15)  # 增加预热步数
    
    def lr_lambda(current_step):
        if current_step < warmup_steps:
            return float(current_step) / float(max(1, warmup_steps))
        progress = float(current_step - warmup_steps) / float(max(1, total_steps - warmup_steps))
        return max(0.05, 0.5 * (1.0 + math.cos(math.pi * progress)))  # 提高最小学习率
    
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)

    # 调整损失函数权重
    intent_criterion = nn.CrossEntropyLoss()
    subj_criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([3.0], device=cfg.DEVICE))
    sent_criterion = nn.CrossEntropyLoss(weight=torch.tensor([1.2, 1.0], device=cfg.DEVICE))
    
    # 增加 dropout
    model.train()
    model.apply(lambda m: setattr(m, 'dropout', nn.Dropout(p=0.4)) if isinstance(m, nn.Linear) else None)
    
    # 调整早停参数
    best_val_acc = 0.0
    best_subj_acc = 0.0
    patience = 0
    current_step = 0
    min_epochs = 20  # 强制最小训练轮数
    max_patience = 15  # 增加耐心值
    
    for epoch in range(cfg.EPOCHS):
        model.train()
        total_loss = 0.0
        subj_loss_total = 0.0
        sent_loss_total = 0.0
        intent_loss_total = 0.0
        
        for batch in train_loader:
            inputs = batch["input_ids"].to(cfg.DEVICE)
            subj_labels = batch["subject_tags"].to(cfg.DEVICE)
            sent_labels = batch["sentiment"].to(cfg.DEVICE)
            intent_labels = batch["intent"].to(cfg.DEVICE)
            
            subj_pred, sent_pred, intent_pred = model(inputs)
            
            loss_subj = subj_criterion(subj_pred, subj_labels)
            loss_sent = sent_criterion(sent_pred, sent_labels)
            loss_intent = intent_criterion(intent_pred, intent_labels)
            
            loss = 0.5 * loss_subj + 0.3 * loss_sent + 0.2 * loss_intent
            
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            scheduler.step()  # Move scheduler step here
            current_step += 1  # Increment step counter
            
            total_loss += loss.item()
            subj_loss_total += loss_subj.item()
            sent_loss_total += loss_sent.item()
            intent_loss_total += loss_intent.item()

        model.eval()
        val_subj_correct = 0
        val_sent_correct = 0
        val_total_samples = 0
        val_total_tokens = 0
        
        with torch.no_grad(), torch.cuda.amp.autocast():
            for batch in val_loader:
                inputs = batch["input_ids"].to(cfg.DEVICE)
                subj_labels = batch["subject_tags"].to(cfg.DEVICE)
                sent_labels = batch["sentiment"].to(cfg.DEVICE)
                intent_labels = batch["intent"].to(cfg.DEVICE)

                subj_pred, sent_pred, intent_pred = model(inputs)

                mask = (inputs != 0).float()
                val_total_tokens += mask.sum().item()
                
                subj_correct = ((torch.sigmoid(subj_pred) > 0.5) == subj_labels.bool()) * mask
                val_subj_correct += subj_correct.sum().item()
                
                val_sent_correct += (torch.argmax(sent_pred, 1) == sent_labels).sum().item()
                val_total_samples += inputs.size(0)

        val_subj_acc = val_subj_correct / val_total_tokens if val_total_tokens > 0 else 0
        val_sent_acc = val_sent_correct / val_total_samples if val_total_samples > 0 else 0
        current_val_acc = (val_subj_acc * 0.6 + val_sent_acc * 0.4)

        # 修改早停逻辑
        if epoch < min_epochs:
            patience = 0  # 在最小轮数内重置耐心值
        else:
            if current_val_acc > best_val_acc + cfg.MIN_DELTA:
                best_val_acc = current_val_acc
                torch.save(model.state_dict(), cfg.MODEL_PATH)
                patience = 0
            else:
                patience += 1
                if patience >= max_patience:
                    print(f"Early stopping at epoch {epoch + 1}")
                    break

        print(f"Epoch {epoch + 1:03d}/{cfg.EPOCHS} | "
              f"Train Loss: {total_loss / len(train_loader):.4f} | "
              f"Subj Loss: {subj_loss_total / len(train_loader):.4f} | "
              f"Sent Loss: {sent_loss_total / len(train_loader):.4f} | "
              f"Subj Acc: {val_subj_acc:.2%} | Sent Acc: {val_sent_acc:.2%} | "
              f"LR: {optimizer.param_groups[0]['lr']:.6f}")  # Get actual LR from optimizer
        
        if val_subj_acc > best_subj_acc:
            best_subj_acc = val_subj_acc
            torch.save(model.state_dict(), cfg.MODEL_PATH.replace('.pth', '_best_subj.pth'))

if __name__ == "__main__":
    main()