import torch
import torch.nn as nn
from data.data_loader import create_data_loaders
from config import Config
from model.dnn import DNNModel
from tqdm import tqdm
from torch.optim.lr_scheduler import ReduceLROnPlateau

class DNNTrainer:
    def __init__(self, config):
        self.config = config
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = DNNModel(config).to(self.device)
        self.criterion = nn.BCELoss()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LEARNING_RATE)
        self.scheduler = ReduceLROnPlateau(
            self.optimizer, 
            mode='max', 
            patience=2,
            factor=0.5
        )
        
    def calculate_metrics(self, preds, labels):
        preds = torch.sigmoid(preds)
        pred_labels = (preds > 0.5).float()
        acc = (pred_labels == labels).float().mean()
        return {"accuracy": acc.item()}

    def save_model(self, path):
        torch.save(self.model.state_dict(), path)
    def train_epoch(self, train_loader):
        self.model.train()
        total_loss = 0
        all_metrics = {"accuracy": 0}
        
        for batch in tqdm(train_loader, desc="Training"):
            user_ids = batch["user_id"].to(self.device)
            item_ids = batch["item_id"].to(self.device)
            labels = batch["label"].to(self.device)
            
            self.optimizer.zero_grad()
            outputs = self.model(user_ids, item_ids)
            loss = self.criterion(outputs, labels)
            loss.backward()
            self.optimizer.step()
            
            total_loss += loss.item()
            metrics = self.calculate_metrics(outputs.detach(), labels)
            for k, v in metrics.items():
                all_metrics[k] += v
        
        avg_loss = total_loss / len(train_loader)
        for k in all_metrics:
            all_metrics[k] /= len(train_loader)
        
        return avg_loss, all_metrics
    
    def evaluate(self, test_loader):
        self.model.eval()
        total_loss = 0
        all_metrics = {"accuracy": 0}
        
        with torch.no_grad():
            for batch in tqdm(test_loader, desc="Evaluating"):
                user_ids = batch["user_id"].to(self.device)
                item_ids = batch["item_id"].to(self.device)
                labels = batch["label"].to(self.device)
                
                outputs = self.model(user_ids, item_ids)
                loss = self.criterion(outputs, labels)
                
                total_loss += loss.item()
                metrics = self.calculate_metrics(outputs, labels)
                for k, v in metrics.items():
                    all_metrics[k] += v
        
        avg_loss = total_loss / len(test_loader)
        for k in all_metrics:
            all_metrics[k] /= len(test_loader)
        
        return avg_loss, all_metrics
    
    def train(self, train_loader, test_loader):
        best_accuracy = 0
        
        for epoch in range(self.config.EPOCHS):
            print(f"\nEpoch {epoch+1}/{self.config.EPOCHS}")
            train_loss, train_metrics = self.train_epoch(train_loader)
            val_loss, val_metrics = self.evaluate(test_loader)
            
            print(f"Train Loss: {train_loss:.4f}, Accuracy: {train_metrics['accuracy']:.4f}")
            print(f"Val Loss: {val_loss:.4f}, Accuracy: {val_metrics['accuracy']:.4f}")
            
            # 学习率调整
            self.scheduler.step(val_metrics['accuracy'])
            
            # 保存最佳模型
            if val_metrics['accuracy'] > best_accuracy:
                best_accuracy = val_metrics['accuracy']
                self.save_model(self.config.DNN_MODEL_SAVE_PATH)
                print(f"New best model saved with accuracy {best_accuracy:.4f}")