import unittest
from unittest.mock import MagicMock, patch
import torch
from src.tune import HyperparameterTune


class TestHyperparameterTune(unittest.TestCase):
    def setUp(self):
        # 创建一个模拟的 Transformer 模型
        self.mock_model = MagicMock()
        self.mock_model.device = torch.device("cpu")
        self.mock_model.parameters.return_value = [torch.nn.Parameter(torch.randn(2, 2))]

        # 创建模拟的数据加载器
        self.mock_train_dataloader = [
            {"src": torch.randint(1, 10, (2, 5)), "tgt": torch.randint(1, 10, (2, 5))}
        ]
        self.mock_val_dataloader = [
            {"src": torch.randint(1, 10, (2, 5)), "tgt": torch.randint(1, 10, (2, 5))}
        ]

        # 初始超参数配置
        self.config = {
            "learning_rate": 1e-3,
            "dropout": 0.1,
            "optimizer": "AdamW"
        }

        self.tuner = HyperparameterTune(self.mock_model, self.mock_train_dataloader, self.mock_val_dataloader, self.config)

    @patch("torch.save")
    def test_save_best_model(self, mock_torch_save):
        self.tuner.save_best_model(score=0.9, save_path="test_model.pth")
        self.assertEqual(self.tuner.best_score, 0.9)
        mock_torch_save.assert_called_once()

    @patch("torch.load")
    def test_load_best_model(self, mock_torch_load):
        mock_torch_load.return_value = "mock_state_dict"
        self.tuner.load_best_model(load_path="test_model.pth")
        self.mock_model.load_state_dict.assert_called_once_with("mock_state_dict")

    def test_update_hyperparameters(self):
        new_config = {"learning_rate": 5e-4, "dropout": 0.2}
        self.tuner.update_hyperparameters(new_config)
        self.assertEqual(self.tuner.config["learning_rate"], 5e-4)
        self.assertEqual(self.tuner.config["dropout"], 0.2)

    def test_tune_method(self):
        with patch.object(self.tuner, "_train_epochs", return_value=0.5):
            self.tuner.tune(optimizer_name="SGD", epochs=50)
            self.assertIn(self.tuner.config["optimizer"], ["AdamW", "SGD"])
            self.assertIn(self.tuner.config["learning_rate"], [1e-4, 5e-4, 1e-3, 5e-3, 1e-2, 5e-2])
            self.assertIn(self.tuner.config["dropout"], [0.1, 0.2, 0.3, 0.4, 0.5])

    def test_generate_masks(self):
        src = torch.tensor([[1, 2, 3], [4, 5, 0]])
        tgt = torch.tensor([[1, 2, 3], [4, 0, 0]])
        src_mask, tgt_mask = self.tuner.generate_masks(src, tgt)
        self.assertEqual(src_mask.shape, (2, 1, 1, 3))
        self.assertEqual(tgt_mask.shape, (2, 1, 3, 3))

    def test_apply_hyperparameters(self):
        self.tuner._apply_hyperparameters()
        for param_group in self.tuner.optimizer.param_groups:
            self.assertEqual(param_group["lr"], self.config["learning_rate"])


if __name__ == "__main__":
    unittest.main()
