"""Model definition."""

import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from .transforms import GroupMultiScaleCrop
from .transforms import GroupRandomHorizontalFlip
from .ConvLSTM.convlstm import ConvLSTM
from . import getLogger
import typing
import logging
log: logging.Logger


# python装饰器实现静态变量
def static_vars(**kwargs):
    def decorate(func):
        for k in kwargs:
            setattr(func, k, kwargs[k])
        return func
    return decorate


# 手动设置debug为True打开调试输出
@static_vars(debug=False)
def plog(*args):
    if plog.debug is True:
        print(*args)


class NOOP(nn.Module):
    def __init__(self):
        super(NOOP, self).__init__()

    def forward(self, x):
        plog('NOOP output shape', x.shape)
        return x


class Flatten(nn.Module):
    def __init__(self):
        super(Flatten, self).__init__()

    def forward(self, x):
        return x.view(x.size(0), -1)


class Coviar(nn.Module):
    def __init__(self, cfg):
        super(Coviar, self).__init__()
        global log
        log = getLogger(cfg)
        self._representation = cfg.representation
        self.num_segments = cfg.num_segments
        self.base_model = cfg.arch
        self.num_class = cfg.num_class

        self._print()
        self._prepare_base_model(self.base_model)
        self._prepare_tsn(self.num_class)

    def _print(self):
        """Print detail info."""
        log.info((
            f'Initializing {self.__class__.__name__}:\n'
            f'\tbase model: {self.base_model}\n'
            f'\tinput_representation: {self._representation}\n'
            f'\tnum_class: {self.num_class}\n'
            f'\tnum_segments: {self.num_segments}\n'))

    def _prepare_tsn(self, num_class):

        feature_dim = getattr(self.base_model, 'fc').in_features
        setattr(self.base_model, 'fc', nn.Linear(feature_dim, num_class))

    def _prepare_base_model(self, base_model):

        if 'resnet' in base_model:
            # torchvision.models.resnetxxx 是模块函数
            self.base_model = getattr(
                torchvision.models, base_model)(pretrained=True)

            # 224x224 after random crop
            self._input_size = 224
        else:
            raise ValueError('Unknown base model: {}'.format(base_model))

        # nn.Conv2d(in_channels, out_channels, kernel_size, stride=1,
        #           padding=0, dilation=1, groups=1, bias=True)
        # NOTE 2个通道指(xdelta,ydelta), coviar_data_loader.c中mv_arr的定义也是(h,w,2)
        if self._representation == 'mv':
            setattr(self.base_model, 'conv1',
                    nn.Conv2d(
                        2, 64,  # 2*64个卷积核
                        kernel_size=(7, 7),
                        stride=(2, 2),  # h,w两个维度的跨度
                        padding=(3, 3),  # zero-paddings
                        bias=False))
            self.data_bn = nn.BatchNorm2d(2)  # batch标准化
        elif self._representation == 'residual':
            self.data_bn = nn.BatchNorm2d(3)

    def forward(self, input):
        input = input.view((-1, ) + input.size()[-3:])
        if self._representation in ['mv', 'residual']:
            input = self.data_bn(input)

        base_out = self.base_model(input)
        return base_out

    # Python内置的@property装饰器负责把一个方法变成属性调用
    @property
    def crop_size(self):
        return self._input_size

    @property
    def scale_size(self):
        return self._input_size * 256 // 224

    def get_augmentation(self):
        if self._representation in ['mv', 'residual']:
            scales = [1, .875, .75]
        else:
            scales = [1, .875, .75, .66]

        print('Augmentation scales:', scales)
        # Transforms are common image transforms.
        # They can be chained together using transforms.Compose.
        return torchvision.transforms.Compose(
            [GroupMultiScaleCrop(self._input_size, scales),
             GroupRandomHorizontalFlip(is_mv=(self._representation == 'mv'))])


class CoviarLstm(Coviar):
    def __init__(self, cfg):
        self.hidden_size = cfg.hidden_size
        self.num_layers = cfg.num_layers
        self.dropout = cfg.dropout
        super(CoviarLstm, self).__init__(cfg)

    def _print(self):
        """Print detail info."""
        log.info((
            f'Initializing {self.__class__.__name__}:\n'
            f'\tbase model: {self.base_model}\n'
            f'\tinput_representation: {self._representation}\n'
            f'\tnum_class: {self.num_class}\n'
            f'\tnum_segments: {self.num_segments}\n'
            f'\tlstm_layers: {self.num_layers}\n'
            f'\tlstm_hidden_size: {self.hidden_size}\n'))

    def _prepare_tsn(self, num_class):

        # 查阅 https://pytorch.org/docs/stable/index.html
        # 迁移学习: pre-trained model on ImageNet
        fea_dim = getattr(self.base_model, 'fc').in_features
        # out_features = getattr(self.base_model, 'fc').out_features
        # setattr(self.base_model, 'fc', nn.Linear(feature_dim, num_class))
        setattr(self.base_model, 'fc', NOOP())

        # batch_first 使得batch_size和seq_length两轴对调
        self.lstm = nn.LSTM(input_size=fea_dim, hidden_size=self.hidden_size,
                            num_layers=self.num_layers, bias=True, batch_first=False,
                            dropout=self.dropout, bidirectional=False)
        self.fc = nn.Linear(self.hidden_size, num_class)

    # Should be overriden by all subclasses.
    def forward(self, input):
        # channel = input.size(2)  # 通道数
        # (batchsize, k, 3or2, 224, 224) reshape to (batchsize*k,  3or2, 244, 244)
        input = input.view((-1, ) + input.size()[-3:])
        if self._representation in ['mv', 'residual']:
            input = self.data_bn(input)

        # base_out.shape = torch.Size([batchsize*k, feature_dim])
        base_out = self.base_model(input)

        # TODO Set initial hidden and cell states

        # 并行时batch被拆开
        # NOTE seq_len是k, 即长度为k的序列, LSTM单元的输入向量维度为feature_dim
        # reshape to [batchsize, k, feature_dim]
        base_out = base_out.view(
            (-1, self.num_segments) + base_out.size()[-1:])
        # reshape to (seq_len, batch, input_size) if batch_first=False
        base_out = base_out.permute(1, 0, 2)

        # Forward propagate LSTM
        # out: tensor of shape (seq_length, batch_size, hidden_size) if batch_first=False
        base_out, _ = self.lstm(base_out)

        # reshape to (batch_size, seq_length, hidden_size)
        base_out = base_out.permute(1, 0, 2).contiguous()
        base_out = base_out.view((-1, ) + base_out.size()[-1:])
        base_out = self.fc(base_out)

        return base_out

class CoviarSelfAttention(Coviar):
    def __init__(self, cfg):
        self.hidden_size = cfg.hidden_size
        self.num_layers = cfg.num_layers
        self.dropout = cfg.dropout
        self.batchsize = cfg.batch_size
        super(CoviarSelfAttention, self).__init__(cfg)

    def _print(self):
        """Print detail info."""
        log.info((
            f'Initializing {self.__class__.__name__}:\n'
            f'\tbase model: {self.base_model}\n'
            f'\tinput_representation: {self._representation}\n'
            f'\tnum_class: {self.num_class}\n'
            f'\tnum_segments: {self.num_segments}\n'
            f'\tlstm_layers: {self.num_layers}\n'
            f'\tlstm_hidden_size: {self.hidden_size}\n'))

    def _prepare_tsn(self, num_class):

        self.avgpool = self.base_model.avgpool
        setattr(self.base_model, 'avgpool', NOOP())
        setattr(self.base_model, 'fc', NOOP())
        from .selfattention import SimpleSelfAttention2
        # TODO: 软编码2048
        self.selfattention = SimpleSelfAttention2(2048)
        print(self.base_model)
        # fea_dim = getattr(self.base_model, 'fc').in_features
        # out_features = getattr(self.base_model, 'fc').out_features
        # setattr(self.base_model, 'fc', nn.Linear(feature_dim, num_class))
        self.fc = nn.Linear(2048, num_class)

    # TODO: 验证能够处理mv, residual
    def forward(self, x):
        # channel = x.size(2)  # 通道数
        # (batchsize, k, 3or2, 224, 224) reshape to (batchsize*k,  3or2, 244, 244)
        x = x.view((-1, ) + x.size()[-3:])
        if self._representation in ['mv', 'residual']:
            x = self.data_bn(x)

        x = self.base_model(x)
        x = x.view((-1, 2048, 7, 7))
        # x = x.view((-1, ) + x.size()[-3:])
        x = self.selfattention(x)
        x = self.avgpool(x)
        x = x.view((-1, 2048))
        x = self.fc(x)

        return x


class CoviarConvLstm(Coviar):
    def __init__(self, cfg):
        self.hidden_size = cfg.hidden_size
        self.num_layers = cfg.num_layers
        super(CoviarConvLstm, self).__init__(cfg)

    def _print(self):
        """Print detail info."""
        log.info((
            f'Initializing {self.__class__.__name__}:\n'
            f'\tbase model: {self.base_model}\n'
            f'\tinput_representation: {self._representation}\n'
            f'\tnum_class: {self.num_class}\n'
            f'\tnum_segments: {self.num_segments}\n'
            f'\tlstm_layers: {self.num_layers}\n'
            f'\tlstm_hidden_size: {self.hidden_size}\n'))

    def _prepare_tsn(self, num_class):

        # fea_dim = getattr(self.base_model, 'fc').in_features
        # out_features = getattr(self.base_model, 'fc').out_features
        # setattr(self.base_model, 'fc', nn.Linear(feature_dim, num_class))
        setattr(self.base_model, 'avgpool', NOOP())
        # NOTE resnet.py: x = x.view(x.size(0), -1)
        setattr(self.base_model, 'fc', NOOP())

        # avgpool input shape: torch.Size([12, 2048, 7, 7])
        self.convLstm = ConvLSTM(input_size=(7, 7),
                                 input_dim=2048,
                                 hidden_dim=[512],
                                 kernel_size=(3, 3),
                                 num_layers=1,
                                 batch_first=True,
                                 bias=True,
                                 return_all_layers=False).cuda()

        self.pred = nn.Conv2d(512, self.num_class, (7, 7))

    # Should be overriden by all subclasses.
    def forward(self, input):
        # channel = input.size(2)  # 通道数
        # (batchsize, k, 3or2, 224, 224) reshape to (batchsize*k,  3or2, 244, 244)
        input = input.view((-1, ) + input.size()[-3:])
        if self._representation in ['mv', 'residual']:
            input = self.data_bn(input)

        # base_out.shape = torch.Size([batchsize*k, feature_dim])
        base_out = self.base_model(input)
        base_out = base_out.view((-1, self.num_segments, 2048, 7, 7))

        [h_state], [c_state] = self.convLstm(base_out)
        h_state = h_state.view((-1, ) + h_state.size()[-3:])
        out = self.pred(h_state)

        # reshape to (batch_size*seq_length, hidden_size)
        out = out.view(out.size()[:2])

        return out


class CrossEntropyLabelSmoothLoss(nn.Module):
    """
    Args:
        num_classes (int): number of classes.
        epsilon (float): weight.
    """

    def __init__(self, num_classes, epsilon=0.1, use_gpu=True):
        super(CrossEntropyLabelSmoothLoss, self).__init__()
        self.num_classes = num_classes
        self.epsilon = epsilon
        self.use_gpu = use_gpu
        self.logsoftmax = nn.LogSoftmax(dim=1)

    def forward(self, inputs, targets):
        """
        Args:
            inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
            targets: ground truth labels with shape (num_classes)
        """
        log_probs = self.logsoftmax(inputs)
        # export LC_ALL=C.UTF-8
        # export LANG=C.UTF-8
        # print(inputs)

        targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)
        if self.use_gpu:
            targets = targets.cuda()

        targets = (1-self.epsilon) * targets + self.epsilon / self.num_classes
        loss = (- targets * log_probs).mean(0).sum()
        return loss



# #############
# Wrong use case
# #############
# 不可行的原因: fc层前向传播逻辑与LSTM不一致
# setattr(self.base_model, 'fc',
#         nn.LSTM(input_size=feature_dim, hidden_size=self.hidden_size,
#                 num_layers=self.num_layers, bias=True,
#                 batch_first=False, dropout=0, bidirectional=False))
