import sys
import torch
import torch.nn as nn
from torch.nn.modules import conv
from torch.utils.data import DataLoader
import torch.nn.functional as F

import math
from DCNv4 import DCNv4

from torchinfo import summary
import numpy as np

from  dataset import ZhCharDataset
from  dataset_96 import ZhCharDataset_96
from  loss_func.SubCenterArcFace import SubCenterArcFace, SubCenterArcFace_diversity 
from . GroupConvBN import GroupResidualBlock_Up, ResidualBlock, GroupConvBN, GroupDeformConvBN
from . MDW_Block import MDW_Block
from third.mobilenetv4 import mobilenetv4
from third.torch_receptive_field import receptive_field, receptive_field_for_unit
from utils import set_clsfier

import pdb
from IPython import embed

def make_divisible(value, divisor, min_value=None, round_down_protect=True):
    if min_value is None:
        min_value = divisor
    new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
    # Make sure that round down does not go down by more than 10%.
    if round_down_protect and new_value < 0.9 * value:
        new_value += divisor
    return new_value

def build_blocks(layer_spec_list):
    layers = nn.Sequential()
    for i, layer in enumerate(layer_spec_list):
        block_name = layer['block_name']
        if block_name == "GroupConvBN":
            schema_ = ['inp', 'oup', 'ks', 'stride', 'dilation', 'groups', 'bn', 'act']
            for j in range(layer['num_blocks']):
                args = dict(zip(schema_, layer['block_specs'][j]))
                layers.add_module(f"layer{i}_convbn{j}", GroupConvBN(**args))
        elif block_name == "MDW_Block":
            # inp, hidden_sz, oup, ks=3, dw_num=4, stride=1, dilation=1, e_bn=False, e_act=None, proj=False, proj_bn=True, res=False, dw_act='gelu'
            schema_ =  ['inp', 'oup', 'hidden_sz', 'ks', 'dw_num', 'stride', 'dilation', 'e_bn', 'e_act', 'proj', 'proj_bn', 'res', 'dw_act']
            for j in range(layer['num_blocks']):
                args = dict(zip(schema_, layer['block_specs'][j]))
                layers.add_module(f"layer{i}_dw_block{j}", MDW_Block(**args))
        elif block_name == "AvgPool2d":
            # kernel_size, stride
            schema_ =  ['kernel_size', 'stride']
            for j in range(layer['num_blocks']):
                args = dict(zip(schema_, layer['block_specs'][j]))
                layers.add_module(f"layer{i}_AvgPool2d_{j}", nn.AvgPool2d(**args))
        elif block_name == 'convbn':
            schema_ =  ['inp', 'oup', 'kernel_size', 'stride']
            for j in range(layer['num_blocks']):
                args = dict(zip(schema_, layer['block_specs'][j]))
                layers.add_module(f"layer{i}_mnv4conv2d_{j}", mobilenetv4.conv_2d(**args))
        elif block_name == "uib":
            schema_ =  ['inp', 'oup', 'start_dw_kernel_size', 'middle_dw_kernel_size', 'middle_dw_downsample', 'stride', 'expand_ratio', 'mhsa']
            for j in range(layer['num_blocks']):
                args = dict(zip(schema_, layer['block_specs'][j]))
                mhsa = args.pop("mhsa") if "mhsa" in args else 0
                layers.add_module(f"layer{i}_uib_{j}", mobilenetv4.UniversalInvertedBottleneckBlock(**args))
                if mhsa:
                    mhsa_schema_ = [
                        "inp", "num_heads", "key_dim", "value_dim", "query_h_strides", "query_w_strides", "kv_strides", 
                        "use_layer_scale", "use_multi_query", "use_residual"
                    ]
                    args = dict(zip(mhsa_schema_, [args['oup']] + (mhsa)))
                    layers.add_module(f"layer{i}_mhsa_{j}", mobilenetv4.MultiHeadSelfAttentionBlock(**args))
        elif block_name == "fused_ib":
            schema_ = ['inp', 'oup', 'stride', 'expand_ratio', 'act']
            for j in range(layer['num_blocks']):
                args = dict(zip(schema_, layer['block_specs'][j]))
                layers.add_module(f"layer{i}_fused_ib_{j}", mobilenetv4.InvertedResidual(**args))
        elif block_name == "FC":
            assert len(layer_spec_list) == 1
            schema_ = ['in_features', 'out_features', 'bias']
            args = dict(zip(schema_, layer['block_specs']))
            layers = nn.Linear(**args)
        elif block_name == "SubCenterArcFace":
            assert len(layer_spec_list) == 1
            # embedding_size, num_classes, k, margin=0.1, scale=16.0, easy_margin=False
            schema_ = ['embedding_size', 'num_classes', 'k', 'margin', 'scale', 'easy_margin']
            args = dict(zip(schema_, layer['block_specs']))
            layers =  SubCenterArcFace(**args)
        elif block_name == "FC_classifier":
            assert len(layer_spec_list) == 1
            # in_features, out_features, bias
            schema_ = ['in_features', 'out_features', 'bias']
            args = dict(zip(schema_, layer['block_specs']))
            layers = nn.Linear(**args)
        else:
            raise NotImplementedError
    return layers

class MDW_Net(nn.Module):

    def __init__(self, m_conf=None, num_experts=4) -> None:
        super(MDW_Net, self).__init__()
        self.name = 'MDW_Net'
        self.model_conf = m_conf

        self.stem = build_blocks(m_conf['stem'])
        self.features = build_blocks(m_conf['features'])
        
        if m_conf.get('fc'):
            self.fc = build_blocks(m_conf['fc'])

        self.gap = nn.AdaptiveAvgPool2d(output_size=(1,1))
        
        clsfier_Ins_id = len(m_conf['classifier']) - 1
        self.classifier_name = m_conf['classifier'][clsfier_Ins_id]["block_name"]
        self.classifier = build_blocks(m_conf['classifier'])
        
        if self.classifier_name == "SubCenterArcFace":
            self.margin = m_conf['classifier'][clsfier_Ins_id]['block_specs'][3]
        self.embedding_size = m_conf['classifier'][clsfier_Ins_id]['block_specs'][0]
        
        # self.classifier = MOE_CNN(128, 64, num_classes, num_experts=4)
        # self.classifier = MOE(embedding_size=num_out_channels, num_classes=num_classes, k=num_sub_centers, margin=margin, scale=scale, easy_margin=easy_margin, num_experts=num_experts)

        self._initialize_weights()

    def forward(self, x, labels=None):
        x = self.stem(x)
        x = self.features(x)
        
        x = self.gap(x)
        x = x.view(-1, self.embedding_size)
        
        if hasattr(self, 'fc'):
            x = self.fc(x)
        
        if self.classifier_name == 'SubCenterArcFace':
            logits = self.classifier(x, labels)
        else:
            logits = self.classifier(x)

        loss =None
        if labels is not None:
            loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.view(-1))
        return logits, loss

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.weight, 1.0)
                nn.init.constant_(m.bias, 0)


    def load(self, model_path: str):
        """ Load the model from a file.
        @param model_path (str): path to model
        """
        params = torch.load(model_path, map_location=lambda storage, loc: storage)
        self.load_state_dict(params['state_dict'])
        print('load model parameters from [%s]' % model_path, file=sys.stderr)

    def save(self, model_path: str):
        """ Save the odel to a file.
        @param path (str): path to the model
        """
        print('save model parameters to [%s]' % model_path, file=sys.stderr)
        params = {
            'state_dict': self.state_dict()
        }
        torch.save(params, model_path)

    def train(self, mode:bool = True):
        super().train(mode)
        if self.classifier_name == 'SubCenterArcFace':
            if mode:
                self.classifier.update(self.margin)
                # assert False, 'hahahaha'
            else:
                self.classifier.update(0)
                # assert False, 'hahahaha'
        if self.classifier_name == 'MOE_SubArc':
            if mode:
                for _model in self.classifier.experts:
                    # assert False, 'hahahaha'
                    _model.expert.update(self.margin)
            else:
                for _model in self.classifier.experts:
                    # assert False, 'hahahaha'
                    _model.expert.update(0)
        return self

    def eval(self):
        super().eval()
        if self.classifier_name == 'SubCenterArcFace':
            self.classifier.update(0)
        if self.classifier_name == 'MOE_SubArc':
            for _model in self.classifier.experts:
                # assert False, 'hahahaha'
                _model.expert.update(0)
        return self

class CNNExpert(nn.Module):
    # 单个专家就是一个核尺寸为1x1的CNN 
    def __init__(self, in_channels, hidden_channels, num_classes):
        super(CNNExpert, self).__init__()
        assert hidden_channels == in_channels//2
        self.hidden_channels = hidden_channels
        self.block = nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1, groups=hidden_channels, bias=False)
        self.bn = nn.BatchNorm2d(hidden_channels)
        self.relu = nn.ReLU(inplace=True)
        self.avgpool = nn.AdaptiveAvgPool2d((1,1))
        self.fc = nn.Linear(hidden_channels, num_classes)

    def forward(self, x):
        x = self.relu(self.bn(self.block(x)))
        x = self.avgpool(x)
        x = x.view(-1, self.hidden_channels)
        x = self.fc(x)
        return x

class MOE_CNN(nn.Module):
    def __init__(self, in_channels, hidden_channels, num_classes, num_experts=4):
        super(MOE_CNN, self).__init__()
        self.in_channels = in_channels
        self.out_channels = hidden_channels
        self.name = 'MOE_CNN'
        self.gate = SE_Gate(in_channels, num_experts)
        self.experts = nn.ModuleList(
            CNNExpert(in_channels, hidden_channels, num_classes) 
            for _ in range(num_experts)
        )
    
    def forward(self, x):
        expert_weights = self.gate(x)
        expert_weights = F.softmax(expert_weights, dim=1).unsqueeze(1) # -> (b, 1, num_experts)

        expert_out_list = [expert(x) for expert in self.experts]
        expert_outs = [expert_out.unsqueeze(1) for expert_out in expert_out_list]
        expert_outs = torch.concat(expert_outs, dim=1)  # -> (b, num_experts, feature_out)
        
        out = torch.matmul(expert_weights, expert_outs).squeeze(1)
        
        return out

class SE_Gate(nn.Module):
    def __init__(self, channel, num_experts=4, reduction=4):
        super(SE_Gate, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
                # nn.Linear(channel, _make_divisible(channel // reduction, 4)),
                # nn.ReLU6(inplace=True),
                # nn.Linear(_make_divisible(channel // reduction, 4), num_experts),
                nn.Linear(channel, num_experts),
                nn.ReLU6(inplace=True)
                # h_sigmoid(inplace=True)
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        x = self.avg_pool(x).view(b, c)
        x = self.fc(x)
        return x




if __name__ == '__main__':
    trainset = ZhCharDataset_96('./dataset/nankai_black/', mod='train', transform=True)
    # trainset = ZhCharDataset_96('./dataset/XIKE-CFS-1_black/', mod='train', transform=True)
    num_classes = len(trainset.label2i)
    trainloader = DataLoader(trainset, batch_size=1, shuffle=True)
    imgs, labels, idx= next(iter(trainloader))
    print(f"imgs.shape:{imgs.shape}")
    print(f"labels.shape:{labels.shape}")
    imgs = imgs.to('cuda:0')
    labels = labels.to('cuda:0')
    idx = idx.to('cuda:0')

    from models.MDWNet_config import MDW_CONFIGS
    # print(type(MDW_CONFIGS["WITH_E_ACT_GELU"]))
    model_conf = set_clsfier(MDW_CONFIGS["BASELINE_D"], num_classes)
    model = MDW_Net(model_conf)
    model.cuda()

    # model = ArcFont_v2(num_out_channels=64,num_sub_centers=4)
    summary(model, input_data=[imgs, labels])
    # _, _ = model(imgs, labels)
    rf_info = receptive_field(model, (1,96,96))
    receptive_field_for_unit(rf_info, "1", (1,1))
    # embed()
