"""
加入网络冻结机制，显式分为特征提取与delta预测部分
之后可能会实现多Heads结构
"""

import torch
import torch.nn as nn
import torchvision.models as models

import os
import torch.optim as optim
from tqdm import tqdm


class TactileDeltaEstimator(nn.Module):
    """
    TactileDeltaEstimator 模型用于从 9 通道的触觉图像中估计滑移距离 δ ∈ [0, 1]。
    该模型基于 ResNet18 结构，修改了输入通道数和输出层。
    """

    def __init__(self, dropout_rate=0.3, freeze_layers=True):
        super(TactileDeltaEstimator, self).__init__()

        # 加载标准结构的 ResNet18
        try:
            from torchvision.models import ResNet18_Weights
            resnet = models.resnet18(ResNet18_Weights.IMAGENET1K_V1)
        except:
            resnet = models.resnet18(pretrained=True)

        # 修改输入通道：从 3 改为 9
        resnet.conv1 = nn.Conv2d(in_channels=9, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True)

        # 替换最后的全连接层为 Identity（避免使用原始分类 head）
        resnet.fc = nn.Identity()

        # 将整个模型作为 backbone 保留（带命名层）
        self.backbone = resnet  # 这将包含 conv1, bn1, layer1~4 等模块

        # 定义我们自己的 Head，用于回归 δ ∈ [0, 1]
        self.head = nn.Sequential(
            nn.Dropout(dropout_rate),
            nn.Linear(512, 128),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(128, 1),
            nn.Sigmoid(),  # 将输出映射到 [0, 1]
        )

    def forward(self, x):
        """
        输入:
            x: Tensor, 形状为 (B, 9, 20, 20)
        输出:
            delta: Tensor, 形状为 (B, 1)
        """
        features = self.backbone(x)  # 输出 (B, 512) 因为 fc 是 Identity
        delta = self.head(features)  # 输出 (B, 1)
        return delta


class LayerUnfreezer:
    """
    用于逐层解冻 TactileDeltaEstimator 模型的 backbone 部分。
    通过指定解冻顺序，逐步解冻各个层以便逐步提高模型的表达能力。
    """

    def __init__(self, model: TactileDeltaEstimator, current_index=0):
        """
        参数：
        - model: 包含名为 `backbone` 的模块，其内部结构为 ResNet（或类似结构）
        """
        assert isinstance(model, TactileDeltaEstimator), "Model must be an instance of TactileDeltaEstimator"
        self.model = model

        # 定义从最后到最前的解冻顺序（越后面层表示越“高层”）
        self.unfreeze_order = []
        for name ,_ in self.model.backbone.named_parameters():
            if name == 'fc.weight' or name == 'fc.bias' or name.startswith('conv1'):
                continue
            layer_name = name.rsplit('.', 1)[0]  # 获取最后一个'.'之前的部分
            if layer_name not in self.unfreeze_order:
                self.unfreeze_order.append(layer_name)
        self.unfreeze_order.append('conv1')
        self.unfreeze_order = self.unfreeze_order[::-1]  # 反转顺序，从高层到低层
        self._current_index = current_index

        # 初始化时冻结所有层
        self._freeze_all_backbone()

    def _freeze_all_backbone(self):
        """冻结 backbone 中从st_idx往后各层的所有参数"""
        for name, module in self.model.backbone.named_children():
            for param in self.model.backbone.parameters():
                param.requires_grad = False

    def unfreeze_n_layers(self, n):
        assert self._current_index + n <= len(self.unfreeze_order), "Cannot unfreeze more layers than available"
        """解冻前 n 层"""
        for i in range(self._current_index,self._current_index+n):
            layer_name = self.unfreeze_order[i]
            ret = self._unfreeze_layer_by_name(layer_name)
            if not ret:
                raise ValueError(f"Failed to unfreeze layer: {layer_name}")
            print(f"[LayerUnfreezer] 解冻层: {layer_name}")
        self._current_index += n

    def unfreeze_next(self):
        """解冻下一层，如果还有可解冻的层则返回 True，否则返回 False"""
        if self._current_index >= len(self.unfreeze_order):
            print("[LayerUnfreezer] 已经解冻所有层")
            self._current_index = len(self.unfreeze_order)  # 确保索引不超过范围
            return False  # 没有更多层可解冻

        layer_name = self.unfreeze_order[self._current_index]
        self._current_index += 1

        # 解冻该层
        ret = self._unfreeze_layer_by_name(layer_name)
        if not ret:
            return False
        print(f"[LayerUnfreezer] 解冻层: {layer_name}")
        return True

    def _unfreeze_layer_by_name(self, goal_name) -> bool:
        """
        在 model.backbone 中根据 name 解冻对应层的参数
        """
        has_found = False
        for name, param in self.model.backbone.named_parameters():
            if name.startswith(goal_name):
                param.requires_grad = True
                has_found = True
        if has_found:
            return True
        print(f"[LayerUnfreezer] 未找到层: {goal_name}")
        return False


class TactileDeltaTrainer:
    """
    用于训练 TactileDeltaEstimator 模型的训练器类。
    支持多种优化器、损失函数和学习率调度器。
    """

    def __init__(
        self,
        model,
        device=None,
        lr=1e-3,
        weight_decay=1e-5,
        loss_fn=None,
        scheduler_type="plateau",
        scheduler_patience=5,
        scheduler_factor=0.5,
    ):
        self.device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = model.to(self.device)

        self.optimizer = optim.Adam(
            filter(lambda p: p.requires_grad, self.model.parameters()), lr=lr, weight_decay=weight_decay
        )

        self.loss_fn = loss_fn or nn.MSELoss()

        # 学习率调度器
        if scheduler_type == "plateau":
            self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer, mode="min", factor=scheduler_factor, patience=scheduler_patience
            )
        elif scheduler_type == "step":
            self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=10, gamma=0.5)
        else:
            self.scheduler = None

        self.working_dir = None  # 用于保存模型的工作目录
        self.dict_train_history = None  # 用于保存训练历史

    def train_epoch(self, train_loader):
        self.model.train()
        total_loss = 0.0
        total_abs_error = 0.0
        progress = tqdm(train_loader, desc="[Train]", leave=False)

        for inputs, targets in progress:
            inputs, targets = inputs.to(self.device), targets.to(self.device)

            self.optimizer.zero_grad()
            outputs = self.model(inputs)
            if targets.dim() == 1:
                targets = targets.unsqueeze(1)  # 保证 targets 形状为 [batch, 1]
            loss = self.loss_fn(outputs, targets)
            loss.backward()
            self.optimizer.step()

            total_loss += loss.item()
            total_abs_error += torch.mean(torch.abs(outputs - targets)).item()
            progress.set_postfix(loss=loss.item())

        avg_loss = total_loss / len(train_loader)
        avg_abs_error = total_abs_error / len(train_loader)
        return avg_loss, avg_abs_error

    def validate(self, val_loader):
        self.model.eval()
        total_loss = 0.0
        total_abs_error = 0.0
        progress = tqdm(val_loader, desc="[Val]", leave=False)

        with torch.no_grad():
            for inputs, targets in progress:
                inputs, targets = inputs.to(self.device), targets.to(self.device)
                outputs = self.model(inputs)
                if targets.dim() == 1:
                    targets = targets.unsqueeze(1)  # 保证 targets 形状为 [batch, 1]
                loss = self.loss_fn(outputs, targets)
                total_loss += loss.item()
                total_abs_error += torch.mean(torch.abs(outputs - targets)).item()
                progress.set_postfix(loss=loss.item())

        avg_loss = total_loss / len(val_loader)
        avg_abs_error = total_abs_error / len(val_loader)
        return avg_loss, avg_abs_error

    def train(
        self, train_loader, val_loader, num_epochs=100, early_stopping_patience=10, model_save_path="best_model.pth"
    ):
        model_save_path = os.path.join(self.working_dir, model_save_path) if self.working_dir else model_save_path
        best_val_loss = float("inf")
        epochs_no_improve = 0
        self.dict_train_history = {"train_loss": [], "val_loss": [], "train_mae": [], "val_mae": []}

        for epoch in range(1, num_epochs + 1):
            print(f"Epoch {epoch}/{num_epochs}")

            train_loss, train_mae = self.train_epoch(train_loader)
            val_loss, val_mae = self.validate(val_loader)

            print(f"Train Loss: {train_loss:.6f}, Val Loss: {val_loss:.6f}")
            print(f"Train MAE: {train_mae:.6f}, Val MAE: {val_mae:.6f}")
            self.dict_train_history["train_loss"].append(train_loss)
            self.dict_train_history["val_loss"].append(val_loss)
            self.dict_train_history["train_mae"].append(train_mae)
            self.dict_train_history["val_mae"].append(val_mae)

            if self.scheduler:
                if isinstance(self.scheduler, optim.lr_scheduler.ReduceLROnPlateau):
                    self.scheduler.step(val_loss)
                else:
                    self.scheduler.step()

            # Early stopping
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                epochs_no_improve = 0
                self.save_model(model_save_path)
                print("  → New best model saved.")
            else:
                epochs_no_improve += 1
                print(f"  → No improvement for {epochs_no_improve} epoch(s).")

                if epochs_no_improve >= early_stopping_patience:
                    print("  → Early stopping triggered.")
                    break

        # 最后加载最佳模型
        self.load_model(model_save_path)
        print(f"Training complete. Best val loss: {best_val_loss:.6f}")
        return self.dict_train_history

    def save_model(self, path, extra_info=None):
        state = {
            "model_state_dict": self.model.state_dict(),
            "optimizer_state_dict": self.optimizer.state_dict(),
        }
        if extra_info is not None:
            state["extra_info"] = extra_info
        torch.save(state, path)
        print(f"[Save] Model saved to {path}")

    def load_model(self, path):
        if not os.path.exists(path):
            raise FileNotFoundError(f"Checkpoint not found: {path}")

        state = torch.load(path, map_location=self.device)
        self.model.load_state_dict(state["model_state_dict"])
        self.optimizer.load_state_dict(state["optimizer_state_dict"])
        print(f"[Load] Model loaded from {path}")
        return state.get("extra_info", None)

    def set_working_dir(self, working_dir):
        """
        设置模型的工作目录，用于保存模型和其他资源。
        :param working_dir: 工作目录路径
        """
        if not os.path.exists(working_dir):
            os.makedirs(working_dir)
        self.working_dir = working_dir
        print(f"[Set Working Dir] Working directory set to {working_dir}")

    def get_model(self):
        return self.model

    def get_optimizer(self):
        return self.optimizer


if __name__ == "__main__":
    # 这里可以添加一些简单的测试代码
    model = TactileDeltaEstimator()
    unfreezer = LayerUnfreezer(model)

    print(unfreezer.unfreeze_order)
    
    # # 输出未冻结的参数量
    # total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    # print(f"Total trainable parameters: {total_params}")
    # unfreezer.unfreeze_n_layers(1)  # 解冻前两层

    # total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    # print(f"Total trainable parameters: {total_params}")    

