from engine.tuner import BaseTuner
import torch
import copy
import random
import numpy as np
from torch.optim.lr_scheduler import ReduceLROnPlateau


class HyperparameterTune(BaseTuner):
    def __init__(self, model, train_dataloader, val_dataloader, config):
        super().__init__(model, train_dataloader, val_dataloader, config)
        self.scheduler = ReduceLROnPlateau(self.optimizer, mode='min', factor=0.5, patience=3)
        self.search_space = {
            "learning_rate": [1e-4, 5e-4, 1e-3, 5e-3, 1e-2, 5e-2],
            "dropout": [0.1, 0.2, 0.3, 0.4, 0.5],
            "optimizer": ["AdamW", "SGD"]
        }

    def tune(self, optimizer_name: str = "AdamW", epochs: int = 10):
        """
        执行超参数优化，包括优化器选择、学习率调整和Dropout变化
        """
        print(f"Tuning with {optimizer_name} for {epochs} epochs...")

        # 1.自动调整优化器
        self.update_hyperparameters({"optimizer": optimizer_name})
        self.optimizer = self._get_optimizer(optimizer_name, self.config["learning_rate"])

        # 2.随机搜索最佳超参数
        best_config = None
        best_loss = float('inf')

        for _ in range(5):  # 5轮搜索
            sampled_hparams = {
                "learning_rate": random.choice(self.search_space["learning_rate"]),
                "dropout": random.choice(self.search_space["dropout"]),
                "optimizer": random.choice(self.search_space["optimizer"])
            }
            print(f"Testing hyperparameters: {sampled_hparams}")

            # 应用超参数
            self.update_hyperparameters(sampled_hparams)

            # 训练3个epoch观察效果
            val_loss = self._train_epochs(3)

            # 如果当前loss更低，保存最佳配置
            if val_loss < best_loss:
                best_loss = val_loss
                best_config = sampled_hparams
                print(f"New best config found: {best_config}, Loss: {best_loss:.4f}")

        # 3.应用最佳超参数
        if best_config:
            self.update_hyperparameters(best_config)
            print(f"Using best config: {best_config}")

    def _train_epochs(self, num_epochs):
        """
        训练num_epochs轮，并返回验证集loss
        """
        total_loss = 0
        self.model.train()
        for _ in range(num_epochs):
            for batch in self.train_dataloader:
                src, tgt = batch['src'].to(self.model.device), batch['tgt'].to(self.model.device)
                src_mask, tgt_mask = self.generate_masks(src, tgt)
                output = self.model(src, tgt[:, :-1], src_mask, tgt_mask[:, :, :-1, :-1])

                loss = torch.nn.CrossEntropyLoss(ignore_index=0)(output.view(-1, output.size(-1)), tgt[:, 1:].reshape(-1))

                self.optimizer.zero_grad()
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
                self.optimizer.step()

                total_loss += loss.item()

        # 计算平均Loss
        avg_loss = total_loss / (num_epochs * len(self.train_dataloader))

        # 使用ReduceLROnPlateau进行学习率调整
        self.scheduler.step(avg_loss)
        print(f"Learning rate adjusted to: {self.scheduler.get_last_lr()[0]}")

        return avg_loss
