import math
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as functional
from resnet1d import ResidualUBlock, Transition, Bottleneck, SingleLayer


class T_Dist(nn.Module):
    def __init__(self, num_classes, feat_dim, device, alpha=0.1, lambda_=0.01):
        super(T_Dist, self).__init__()
        self.device = device
        self.num_classes = num_classes
        self.alpha = alpha
        self.lambda_ = lambda_

        self.means = nn.Parameter(torch.randn(num_classes, feat_dim))
        self.raw_freedom_degrees = nn.Parameter(torch.randn(num_classes, 1))
        self.var = torch.ones((num_classes, feat_dim)).to(device)

        nn.init.xavier_uniform_(self.means)
        nn.init.xavier_uniform_(self.raw_freedom_degrees)

        print('alpha={}, lambda_={}'.format(self.alpha, self.lambda_))

    def forward(self, feat, labels=None):
        feat = functional.normalize(feat, dim=1)
        means = functional.normalize(self.means, dim=1)
        freedom_degrees = self.raw_freedom_degrees.squeeze() ** 2 + 0.1

        batch_size = feat.size()[0]
        feat_len = torch.tensor(feat.size()[1], dtype=torch.float32).to(self.device)

        real_alpha = self.alpha
        real_lambda = self.lambda_

        reshape_var = torch.unsqueeze(self.var, dim=1)
        reshape_mean = torch.unsqueeze(means, dim=1)
        expand_feat = torch.unsqueeze(feat, dim=0)

        data_mins_mean = expand_feat - reshape_mean
        pair_m_distance = torch.matmul(data_mins_mean / (reshape_var + 1e-8), torch.transpose(data_mins_mean, 1, 2)) / 2
        index = torch.tensor([i for i in range(batch_size)])
        real_neg_sqr_dist = pair_m_distance[:, index, index].T

        det = torch.prod(self.var, 1)

        if labels is None:
            sqr_dist = real_neg_sqr_dist

        else:
            labels_reshped = labels.view(labels.size()[0], -1)

            if self.device == 'cuda':
                ALPHA = torch.zeros(batch_size, self.num_classes).to(self.device).scatter_(1, labels_reshped,
                                                                                           real_alpha)
                K = ALPHA + torch.ones([batch_size, self.num_classes]).to(self.device)
            else:
                ALPHA = torch.zeros(batch_size, self.num_classes).scatter_(1, labels_reshped, real_alpha)
                K = ALPHA + torch.ones([batch_size, self.num_classes])

            sqr_dist = torch.multiply(K, real_neg_sqr_dist)

        fd = torch.tile(torch.unsqueeze(freedom_degrees, dim=0), (batch_size, 1))

        exp = (fd + torch.log(feat_len)) / 2.0

        neg_sqr_dist = 1. + sqr_dist / fd
        all_neg_sqr_dist = torch.pow(neg_sqr_dist + 1e-8, -exp)

        c_up = torch.exp(torch.lgamma(torch.clamp(exp, 1e-8, 1e8)))
        c_down = torch.exp(torch.lgamma(torch.clamp(fd / 2., 1e-8, 1e8))) * torch.pow(fd, torch.log(feat_len) / 2.)

        logit = c_up / c_down * torch.sqrt(det) * all_neg_sqr_dist

        logit = functional.normalize(logit, dim=1)

        if labels is None:
            psudo_labels = torch.argmax(logit, dim=1)
            means_batch = torch.index_select(means, dim=0, index=psudo_labels)
            center_loss = real_lambda * (torch.sum((feat - means_batch) ** 2) / 2) * (1. / batch_size)  # center_loss

        else:
            index_labels = torch.argmax(labels, dim=1)
            means_batch = torch.index_select(means, dim=0, index=index_labels)
            center_loss = real_lambda * (torch.sum((feat - means_batch) ** 2) / 2) * (1. / batch_size)  # center_loss
        return logit, center_loss, means, real_alpha, real_lambda

class MetaResNet1d(nn.Module):
    def __init__(self, nOUT, in_ch=12, out_ch=256, mid_ch=64, if_t_dist=True, al=0., la=0., device='cuda'):
        super(MetaResNet1d, self).__init__()
        self.if_t_dist = if_t_dist
        self.conv = nn.Conv1d(in_channels=in_ch, out_channels=out_ch, kernel_size=15, padding=7, stride=2, bias=False)
        self.bn = nn.BatchNorm1d(out_ch)

        self.rub_0 = ResidualUBlock(out_ch=out_ch, mid_ch=mid_ch, layers=8)
        self.rub_1 = ResidualUBlock(out_ch=out_ch, mid_ch=mid_ch, layers=7)
        self.rub_2 = ResidualUBlock(out_ch=out_ch, mid_ch=mid_ch, layers=6)
        self.rub_3 = ResidualUBlock(out_ch=out_ch, mid_ch=mid_ch, layers=5)
        self.student = T_Dist(num_classes=nOUT, feat_dim=out_ch, device=device, alpha=al, lambda_=la)

        growthRate = 12
        reduction = 0.5
        nChannels = out_ch
        nDenseBlocks = 16

        self.dense1 = self._make_dense(nChannels, growthRate=12, nDenseBlocks=nDenseBlocks, bottleneck=True)
        nChannels += nDenseBlocks * growthRate
        nOutChannels = int(math.floor(nChannels * reduction))
        self.trans1 = Transition(nChannels, nOutChannels)

        nChannels = nOutChannels
        self.dense2 = self._make_dense(nChannels, growthRate=12, nDenseBlocks=nDenseBlocks, bottleneck=True)
        nChannels += nDenseBlocks * growthRate
        self.trans2 = Transition(nChannels, out_ch)

        self.mha = nn.MultiheadAttention(out_ch, 8)
        self.pool = nn.AdaptiveMaxPool1d(output_size=1)

        # Meta features (gender, age) -> projection to match out_ch
        self.meta_fc = nn.Sequential(
            nn.Linear(7, out_ch),
            nn.ReLU(),
            nn.Dropout(0.3)
        )

        # Final fusion and prediction
        self.fc_0 = nn.Linear(out_ch * 2, out_ch)
        self.fc_1 = nn.Linear(out_ch, nOUT)
        self.sigmoid = nn.Sigmoid()
    
    def _make_dense(self, nChannels, growthRate, nDenseBlocks, bottleneck):
        layers = []
        for i in range(int(nDenseBlocks)):
            if bottleneck:
                layers.append(Bottleneck(nChannels, growthRate))
            else:
                layers.append(SingleLayer(nChannels, growthRate))
            nChannels += growthRate
        return nn.Sequential(*layers)

    def forward(self, x, meta, y=None):
        """
        x: (B, C, T)
        meta: (B, 7)  # [gender, age]
        """
        x = functional.leaky_relu(self.bn(self.conv(x)))

        x = self.rub_0(x)
        x = self.rub_1(x)
        x = self.rub_2(x)
        x = self.rub_3(x)

        x = self.trans1(self.dense1(x))
        x = self.trans2(self.dense2(x))

        x = functional.dropout(x, p=0.5, training=self.training)

        x = x.permute(2, 0, 1)
        x, _ = self.mha(x, x, x)
        x = x.permute(1, 2, 0)

        x = self.pool(x).squeeze(2)  # (B, out_ch)

        meta_feat = self.meta_fc(meta)  # (B, out_ch)
        combined = torch.cat([x, meta_feat], dim=1)
        feat = self.fc_0(combined)

        if self.if_t_dist:
            logit, center_loss, means, ra, rl = self.student(feat, labels=y)
            return logit, feat, center_loss, means, ra, rl
        else:
            logit = self.fc_1(feat)
            logit = self.sigmoid(logit)

            return logit, feat
        

if __name__ == "__main__":
    # Model parameters
    num_classes = 2     # Number of output classes
    input_channels = 12  # Number of input channels
    seq_length = 2048    # Sequence length
    batch_size = 50     # Batch size
    meta_input = torch.randn(batch_size, 7)  # Meta features
    y = torch.randint(0, num_classes, (batch_size, num_classes))  # Random labels for testing

    # Instantiate the ResNet model
    model = MetaResNet1d(nOUT=num_classes, in_ch=input_channels, out_ch=256, mid_ch=64, al=0., la=0., device='cpu', if_t_dist=True)
    print("ResNet model initialized successfully.\n")

    # Create a sample input tensor (batch_size, channels, seq_length)
    input_tensor = torch.randn(batch_size, input_channels, seq_length)
    print(f"Sample input tensor shape: {input_tensor.shape}\n")

    # Forward pass
    logit, feat, center_loss, meas, ra, rl = model(input_tensor, meta_input, y)
    print(f"Model output shape: {logit.shape}\n")

    # Validate output shape
    expected_shape = (batch_size, num_classes)
    assert logit.shape == expected_shape, f"Error: Expected output shape {expected_shape}, but got {logit.shape}"
    print("Test passed! The ResNet model works as expected.")