# models/dynamic_builder.py

import torch.nn as nn
from models.blocks import *

class DynamicModelBuilder:
    @staticmethod
    def build_layer(layer_config, input_dim):
        layer_type = layer_config["type"]
        params = layer_config.get("params", {})
        activation = layer_config.get("activation")

        if layer_type == "Linear":
            layer = nn.Linear(in_features=input_dim, out_features=params["out_features"])
            output_dim = params["out_features"]
        elif layer_type == "Dropout":
            layer = nn.Dropout(**params)
            output_dim = input_dim
        elif layer_type == "BatchNorm1d":
            layer = nn.BatchNorm1d(input_dim)
            output_dim = input_dim
        elif layer_type == "ResidualBlock":
            layer = ResidualBlock(input_dim, params["hidden_dim"])
            output_dim = input_dim
        elif layer_type == "TemporalConvBlock":
            layer = TemporalConvBlock(input_dim, params["out_channels"], kernel_size=params.get("kernel_size", 3))
            output_dim = params["out_channels"]
        elif layer_type == "FeatureAttentionBlock":
            # 需要知道有多少 token（输入维度即 num_tokens）
            layer = FeatureAttentionBlock(
                num_tokens=input_dim,
                d_model=params.get("d_model", 64),
                n_heads=params.get("n_heads", 4),
                n_layers=params.get("n_layers", 2),
            )
            output_dim = params.get("d_model", 64) 
        elif layer_type == "ResidualBlockGELU":
            layer = ResidualBlockGELU(input_dim, params["hidden_dim"])
            output_dim = input_dim
        elif layer_type == "LayerNorm":
            layer = nn.LayerNorm(input_dim)  
            output_dim = input_dim
        elif layer_type == "XGBoostBlock":
            layer = XGBoostBlock(input_dim)
            output_dim = 1
        elif layer_type == "RandomForestBlock":
            layer = RandomForestBlock(input_dim)
            output_dim = 1
        elif layer_type == "BottleneckBlock":
            layer = BottleneckBlock(input_dim, expansion=params.get("expansion", 4))
            output_dim = input_dim
        elif layer_type == "FeedForwardBlock":
            layer = FeedForwardBlock(input_dim, params["hidden_dim"])
            output_dim = input_dim
        elif layer_type == "SqueezeExcitationBlock":
            layer = SqueezeExcitationBlock(input_dim, reduction=params.get("reduction", 16))
            output_dim = input_dim
        elif layer_type == "ConvMLPBlock":
            layer = ConvMLPBlock(
                input_dim, 
                params["hidden_dim"], 
                kernel_size=params.get("kernel_size", 3)
            )
            output_dim = input_dim
        elif layer_type == "GatedLinearUnit":
            layer = GatedLinearUnit(input_dim, params["hidden_dim"])
            output_dim = input_dim
        elif layer_type == "KNNBlock":
            layer = KNNBlock(input_dim)
            output_dim = 1
        else:
            raise ValueError(f"Unsupported layer type: {layer_type}")
        

        act = None
        if activation:
            act = getattr(nn, activation)()

        return layer, act, output_dim
