import sys
import torch
import torch.nn as nn
from torch.nn.modules import conv
from torch.utils.data import DataLoader
import torch.nn.functional as F

import math
from DCNv4 import DCNv4

from torchinfo import summary
import numpy as np

from dataset import ZhCharDataset
from dataset_96 import ZhCharDataset_96
from loss_func.SubCenterArcFace import SubCenterArcFace, SubCenterArcFace_diversity 
from .GroupConvBN import GroupResidualBlock_Up, ResidualBlock, GroupConvBN, GroupDeformConvBN
from .MDW_Block import MDW_Block
from .MDW_Net import build_blocks
from third.torch_receptive_field import receptive_field, receptive_field_for_unit
from third.mobilenetv4 import mobilenetv4
from utils import set_clsfier

import pdb
from IPython import embed

def make_divisible(value, divisor, min_value=None, round_down_protect=True):
    if min_value is None:
        min_value = divisor
    new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
    # Make sure that round down does not go down by more than 10%.
    if round_down_protect and new_value < 0.9 * value:
        new_value += divisor
    return new_value


class MobileNetV4_Font(nn.Module):

    def __init__(self, m_conf=None, num_experts=4) -> None:
        super(MobileNetV4_Font, self).__init__()
        self.name = 'MobileNetV4_Font'
        self.model_conf = m_conf

        self.stem = build_blocks(m_conf['stem'])
        self.features = build_blocks(m_conf['features'])

        if m_conf.get('fc'):
            self.fc = build_blocks(m_conf['fc'])

        self.gap = nn.AdaptiveAvgPool2d(output_size=(1,1))
        
        clsfier_Ins_id = len(m_conf['classifier']) - 1
        self.classifier_name = m_conf['classifier'][clsfier_Ins_id]["block_name"]
        self.classifier = build_blocks(m_conf['classifier'])
        
        if self.classifier_name == "SubCenterArcFace":
            self.margin = m_conf['classifier'][clsfier_Ins_id]['block_specs'][3]
        self.embedding_size = m_conf['classifier'][clsfier_Ins_id]['block_specs'][0]
        
        # self.classifier = MOE_CNN(128, 64, num_classes, num_experts=4)
        # self.classifier = MOE(embedding_size=num_out_channels, num_classes=num_classes, k=num_sub_centers, margin=margin, scale=scale, easy_margin=easy_margin, num_experts=num_experts)

        self._initialize_weights()

    def forward(self, x, labels=None):
        x = self.stem(x)
        x = self.features(x)
        
        x = self.gap(x)
        x = x.view(-1, self.embedding_size)
        
        if self.classifier_name == 'SubCenterArcFace':
            logits = self.classifier(x, labels)
        else:
            logits = self.classifier(x)

        loss =None
        if labels is not None:
            loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.view(-1))
        return logits, loss

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.weight, 1.0)
                nn.init.constant_(m.bias, 0)


    def load(self, model_path: str):
        """ Load the model from a file.
        @param model_path (str): path to model
        """
        params = torch.load(model_path, map_location=lambda storage, loc: storage)
        self.load_state_dict(params['state_dict'])
        print('load model parameters from [%s]' % model_path, file=sys.stderr)

    def save(self, model_path: str):
        """ Save the odel to a file.
        @param path (str): path to the model
        """
        print('save model parameters to [%s]' % model_path, file=sys.stderr)
        params = {
            'state_dict': self.state_dict()
        }
        torch.save(params, model_path)

    def train(self, mode:bool = True):
        super().train(mode)
        if self.classifier_name == 'SubCenterArcFace':
            if mode:
                self.classifier.update(self.margin)
                # assert False, 'hahahaha'
            else:
                self.classifier.update(0)
                # assert False, 'hahahaha'
        if self.classifier_name == 'MOE_SubArc':
            if mode:
                for _model in self.classifier.experts:
                    # assert False, 'hahahaha'
                    _model.expert.update(self.margin)
            else:
                for _model in self.classifier.experts:
                    # assert False, 'hahahaha'
                    _model.expert.update(0)
        return self

    def eval(self):
        super().eval()
        if self.classifier_name == 'SubCenterArcFace':
            self.classifier.update(0)
        if self.classifier_name == 'MOE_SubArc':
            for _model in self.classifier.experts:
                # assert False, 'hahahaha'
                _model.expert.update(0)
        return self

class CNNExpert(nn.Module):
    # 单个专家就是一个核尺寸为1x1的CNN 
    def __init__(self, in_channels, hidden_channels, num_classes):
        super(CNNExpert, self).__init__()
        assert hidden_channels == in_channels//2
        self.hidden_channels = hidden_channels
        self.block = nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1, groups=hidden_channels, bias=False)
        self.bn = nn.BatchNorm2d(hidden_channels)
        self.relu = nn.ReLU(inplace=True)
        self.avgpool = nn.AdaptiveAvgPool2d((1,1))
        self.fc = nn.Linear(hidden_channels, num_classes)

    def forward(self, x):
        x = self.relu(self.bn(self.block(x)))
        x = self.avgpool(x)
        x = x.view(-1, self.hidden_channels)
        x = self.fc(x)
        return x

class MOE_CNN(nn.Module):
    def __init__(self, in_channels, hidden_channels, num_classes, num_experts=4):
        super(MOE_CNN, self).__init__()
        self.in_channels = in_channels
        self.out_channels = hidden_channels
        self.name = 'MOE_CNN'
        self.gate = SE_Gate(in_channels, num_experts)
        self.experts = nn.ModuleList(
            CNNExpert(in_channels, hidden_channels, num_classes) 
            for _ in range(num_experts)
        )
    
    def forward(self, x):
        expert_weights = self.gate(x)
        expert_weights = F.softmax(expert_weights, dim=1).unsqueeze(1) # -> (b, 1, num_experts)

        expert_out_list = [expert(x) for expert in self.experts]
        expert_outs = [expert_out.unsqueeze(1) for expert_out in expert_out_list]
        expert_outs = torch.concat(expert_outs, dim=1)  # -> (b, num_experts, feature_out)
        
        out = torch.matmul(expert_weights, expert_outs).squeeze(1)
        
        return out

class SE_Gate(nn.Module):
    def __init__(self, channel, num_experts=4, reduction=4):
        super(SE_Gate, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
                # nn.Linear(channel, _make_divisible(channel // reduction, 4)),
                # nn.ReLU6(inplace=True),
                # nn.Linear(_make_divisible(channel // reduction, 4), num_experts),
                nn.Linear(channel, num_experts),
                nn.ReLU6(inplace=True)
                # h_sigmoid(inplace=True)
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        x = self.avg_pool(x).view(b, c)
        x = self.fc(x)
        return x




if __name__ == '__main__':
    trainset = ZhCharDataset_96('./dataset/nankai_black/', mod='train', transform=True)
    # trainset = ZhCharDataset_96('./dataset/XIKE-CFS-1_black/', mod='train', transform=True)
    num_classes = len(trainset.label2i)
    trainloader = DataLoader(trainset, batch_size=1, shuffle=True)
    imgs, labels, idx= next(iter(trainloader))
    print(f"imgs.shape:{imgs.shape}")
    print(f"labels.shape:{labels.shape}")
    imgs = imgs.to('cuda:0')
    labels = labels.to('cuda:0')
    idx = idx.to('cuda:0')

    from models.MDWNet_config import MDW_CONFIGS
    model_conf = set_clsfier(MDW_CONFIGS["MNV4_FONT_DEPTH_FC_DOWNSAMPLE2"], num_classes)
    print(model_conf)
    model = MobileNetV4_Font(model_conf)
    model.cuda()

    # model = ArcFont_v2(num_out_channels=64,num_sub_centers=4)
    summary(model, input_data=[imgs, labels])
    # _, _ = model(imgs, labels)
    rf_info = receptive_field(model, (1,96,96))
    receptive_field_for_unit(rf_info, "1", (1,1))
    # embed()
