import torch
torch.set_num_threads(1)
import torch.nn as nn
import torch.nn.functional as F
from src.models.layer.embedding_layer import TabularEmbedding

class BasicBlock1D(nn.Module):
    """1D ResNet 基本残差块"""
    expansion = 1
    def __init__(self, in_channels, out_channels, stride=1, downsample=None):
        super(BasicBlock1D, self).__init__()
        self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm1d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm1d(out_channels)
        self.downsample = downsample

    def forward(self, x):
        identity = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        if self.downsample is not None:
            identity = self.downsample(x)

        out += identity
        out = self.relu(out)
        return out

class ResNetClassifier(nn.Module):
    """接入 TabularEmbedding 的 1D ResNet 分类器"""
    def __init__(self, num_cat_features, num_cont_features, num_classes=2,
                 embedding_dim=16, hidden_dim=128, dropout=0.5, fuse=True):
        super(ResNetClassifier, self).__init__()

        self.fuse = fuse

        # 类别特征 embedding
        self.feature_extractor = TabularEmbedding(num_cat_features, num_cont_features,
                                                  embedding_dim=embedding_dim, cont_hidden_dim=hidden_dim,
                                                  fuse=fuse)

        if self.fuse:
            in_channels = 1
        else:
            in_channels = embedding_dim 

        # ResNet stem
        self.stem = nn.Sequential(
            nn.Conv1d(in_channels, 32, kernel_size=7, stride=2, padding=3, bias=False),
            nn.BatchNorm1d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
        )
        self.layer1 = self._make_layer(32, 32, blocks=1)
        self.layer2 = self._make_layer(32, 64, blocks=1, stride=2)


        # 全局池化
        self.avgpool = nn.AdaptiveAvgPool1d(1)

        # 分类头
        if self.fuse:
            classifier_input_dim = 64
        else:
            classifier_input_dim = 64 + hidden_dim
        self.classifier = nn.Sequential(
            nn.Dropout(dropout),
            nn.Linear(classifier_input_dim, 128),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout),
            nn.Linear(128, num_classes)
        )

    def _make_layer(self, in_channels, out_channels, blocks, stride=1):
        downsample = None
        if stride != 1 or in_channels != out_channels:
            downsample = nn.Sequential(
                nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm1d(out_channels),
            )
        layers = [BasicBlock1D(in_channels, out_channels, stride, downsample)]
        for _ in range(1, blocks):
            layers.append(BasicBlock1D(out_channels, out_channels))
        return nn.Sequential(*layers)

    def forward(self, batch):
        x_cat, x_cont, _ = batch

        if self.fuse:
            x_combined, _, _ = self.feature_extractor(x_cat, x_cont) # [batch, cat_output_dim + cont_output_dim]
            x = x_combined.unsqueeze(1)  # [batch, 1, features]

            x = self.stem(x)
            x = self.layer1(x)
            x = self.layer2(x)

            x = self.avgpool(x)  # [batch, 512, 1]
            x = torch.flatten(x, 1)  # [batch, 512]

        else:
            # 类别特征 embedding -> [batch, embedding_dim, num_cat_features]
            x_cat_embedded, x_cont_proj = self.feature_extractor(x_cat, x_cont)
            # 进入 ResNet stem + residual blocks
            x = self.stem(x_cat_embedded)
            x = self.layer1(x)
            x = self.layer2(x)

            x = self.avgpool(x)  # [batch, 512, 1]
            x = torch.flatten(x, 1)  # [batch, 512]

            x = torch.cat([x, x_cont_proj], dim=1)

        # 分类
        out = self.classifier(x)
        return out
