# -*- coding: utf-8 -*-
from collections import namedtuple
import warnings

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed.rpc as rpc
from torch import Tensor

from inception import InceptionOutputs, _InceptionOutputs
from inception import BasicConv2d, InceptionA, InceptionAux, InceptionB, \
    InceptionC, InceptionD, InceptionE

__all__ = ['InceptionShard1', 'InceptionShard2', 'DistInceptionV3']


def _call_method(method, rref, *args, **kwargs):
    r"""
    a helper function to call a method on the given RRef
    """
    return method(rref.local_value(), *args, **kwargs)


def _parameter_rrefs(module):
    r"""
    A helper to create RRef objects for remote module parameters.
    """
    return [rpc.RRef(param) for param in module.parameters()]


def _remote_method(method, rref, *args, **kwargs):
    r"""
    a helper function to run method on the owner of rref and fetch back the
    result using RPC.
    """
    args = [method, rref] + list(args)
    return rpc.rpc_sync(rref.owner(), _call_method, args=args, kwargs=kwargs)


def _async_remote_method(method, rref, *args, **kwargs):
    r"""
    a helper function to run method on the owner of rref and fetch back the
    result asynchronously by using RPC asy.
    """
    args = [method, rref] + list(args)
    fut_res = rpc.rpc_async(rref.owner(), _call_method, args=args, kwargs=kwargs)

    return fut_res


class InceptionShard1(nn.Module):
    r"""
    First shard of the Inception V3 model.
    """
    def __init__(self, transform_input=False, inception_blocks=None, init_weights=True):
        super(InceptionShard1, self).__init__()
        if inception_blocks is None:
            inception_blocks = [
                BasicConv2d, InceptionA, InceptionB, InceptionC,
                InceptionD, InceptionE, InceptionAux
            ]

        assert len(inception_blocks) == 7, f"Expect 7 inception blocks, got {len(inception_blocks)}."
        conv_block = inception_blocks[0]
        inception_a = inception_blocks[1]

        self.devices = [torch.device("cuda:0"), torch.device("cuda:1")]

        self.transform_input = transform_input
        self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2).to(self.devices[0])
        self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3).to(self.devices[0])
        self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1).to(self.devices[0])
        self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1).to(self.devices[0])
        self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3).to(self.devices[0])
        self.Mixed_5b = inception_a(192, pool_features=32).to(self.devices[0])
        self.Mixed_5c = inception_a(256, pool_features=64).to(self.devices[0])

        if init_weights:
            for m in self.modules():
                if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                    import scipy.stats as stats
                    stddev = m.stddev if hasattr(m, 'stddev') else 0.1
                    X = stats.truncnorm(-2, 2, scale=stddev)
                    values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
                    values = values.view(m.weight.size())
                    with torch.no_grad():
                        m.weight.copy_(values)
                elif isinstance(m, nn.BatchNorm2d):
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)

    def _transform_input(self, x):
        if self.transform_input:
            x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
            x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
            x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
            x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
        return x

    def _forward(self, x):
        # N x 3 x 299 x 299
        x = self.Conv2d_1a_3x3(x)
        # N x 32 x 149 x 149
        x = self.Conv2d_2a_3x3(x)
        # N x 32 x 147 x 147
        x = self.Conv2d_2b_3x3(x)
        # N x 64 x 147 x 147
        x = F.max_pool2d(x, kernel_size=3, stride=2)
        # N x 64 x 73 x 73
        x = self.Conv2d_3b_1x1(x)
        # N x 80 x 73 x 73
        x = self.Conv2d_4a_3x3(x)
        # N x 192 x 71 x 71
        x = F.max_pool2d(x, kernel_size=3, stride=2)
        # N x 192 x 35 x 35
        x = self.Mixed_5b(x)
        # N x 256 x 35 x 35
        x = self.Mixed_5c(x)

        return x

    def forward(self, x_rref):
        import socket
        print(socket.gethostname(), 'InceptionShard1 forward...')
        x = self._transform_input(x_rref.to_here().to(self.devices[0]))

        return self._forward(x).cpu()


class InceptionShard2(nn.Module):
    r"""
    Second shard of the Inception V3 model.
    """
    def __init__(self, num_classes=1000, aux_logits=True, inception_blocks=None,
                 init_weights=True):
        super(InceptionShard2, self).__init__()
        if inception_blocks is None:
            inception_blocks = [
                BasicConv2d, InceptionA, InceptionB, InceptionC,
                InceptionD, InceptionE, InceptionAux
            ]
        assert len(inception_blocks) == 7, f"Expect 7 inception blocks, got {len(inception_blocks)}."
        inception_a = inception_blocks[1]
        inception_b = inception_blocks[2]
        inception_c = inception_blocks[3]
        inception_d = inception_blocks[4]
        inception_e = inception_blocks[5]
        inception_aux = inception_blocks[6]

        self.devices = [torch.device("cuda:0"), torch.device("cuda:1")]

        self.aux_logits = aux_logits
        self.Mixed_5d = inception_a(288, pool_features=64).to(self.devices[0])
        self.Mixed_6a = inception_b(288).to(self.devices[0])
        self.Mixed_6b = inception_c(768, channels_7x7=128).to(self.devices[0])
        self.Mixed_6c = inception_c(768, channels_7x7=160).to(self.devices[0])
        self.Mixed_6d = inception_c(768, channels_7x7=160).to(self.devices[0])
        self.Mixed_6e = inception_c(768, channels_7x7=192).to(self.devices[0])
        if aux_logits:
            self.AuxLogits = inception_aux(768, num_classes).to(self.devices[0])
        self.Mixed_7a = inception_d(768).to(self.devices[0])
        self.Mixed_7b = inception_e(1280).to(self.devices[0])
        self.Mixed_7c = inception_e(2048).to(self.devices[0])
        self.fc = nn.Linear(2048, num_classes).to(self.devices[0])

        if init_weights:
            for m in self.modules():
                if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                    import scipy.stats as stats
                    stddev = m.stddev if hasattr(m, 'stddev') else 0.1
                    X = stats.truncnorm(-2, 2, scale=stddev)
                    values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
                    values = values.view(m.weight.size())
                    with torch.no_grad():
                        m.weight.copy_(values)
                elif isinstance(m, nn.BatchNorm2d):
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)

    def _forward(self, x):
        # N x 288 x 35 x 35
        x = self.Mixed_5d(x)
        # N x 288 x 35 x 35
        x = self.Mixed_6a(x)
        # N x 768 x 17 x 17
        x = self.Mixed_6b(x)
        # N x 768 x 17 x 17
        x = self.Mixed_6c(x)
        # N x 768 x 17 x 17
        x = self.Mixed_6d(x)
        # N x 768 x 17 x 17
        x = self.Mixed_6e(x)
        # N x 768 x 17 x 17
        aux_defined = self.training and self.aux_logits
        if aux_defined:
            aux = self.AuxLogits(x)
        else:
            aux = None
        # N x 768 x 17 x 17
        x = self.Mixed_7a(x)
        # N x 1280 x 8 x 8
        x = self.Mixed_7b(x)
        # N x 2048 x 8 x 8
        x = self.Mixed_7c(x)
        # N x 2048 x 8 x 8
        # Adaptive average pooling
        x = F.adaptive_avg_pool2d(x, (1, 1))
        # N x 2048 x 1 x 1
        x = F.dropout(x, training=self.training)
        # N x 2048 x 1 x 1
        x = torch.flatten(x, 1)
        # N x 2048
        x = self.fc(x)
        # N x 1000 (num_classes)

        return x, aux

    def forward(self, x_rref):
        import socket
        print(socket.gethostname(), 'InceptionShard2 forward...')
        x, aux = self._forward(x_rref.to_here().to(self.devices[0]))
        aux_defined = self.training and self.aux_logits
        if not aux_defined:
            return x

        fut_res = torch.stack([x, aux], dim=0)
        return fut_res.cpu()


class DistInceptionV3(nn.Module):
    r"""
    A distributed InceptionV3 model which puts the first several layers on
    a remote parameter server, and locally holds parameters for the rest layers.
    The structure of the InceptionV3 model is borrowed from the PyTorch ImageNet
    example. See https://github.com/pytorch/examples/tree/master/imagenet.
    """
    def __init__(self, split_size, worker, num_classes=1000, aux_logits=True,
                 transform_input=False, inception_blocks=None,
                 init_weights=True):
        super(DistInceptionV3, self).__init__()
        self.split_size = split_size

        self.inception_shard1 = InceptionShard1(transform_input, inception_blocks,
                                                init_weights)
        self.inception_shard2_rref = rpc.remote(worker, InceptionShard2,
                                                args=(num_classes, aux_logits,
                                                inception_blocks, init_weights))

    def forward(self, x):
        import socket
        output_futs = []

        i = 0
        split_tensors = x.split(self.split_size, dim=0)
        for x in iter(x.split(self.split_size, dim=0)):
            print("Processing macro-batch [{}, {}].".format(i * self.split_size, (i+1) * self.split_size - 1))
            i = i + 1
            y = self.inception_shard1(rpc.RRef(x))
            fut_res = _async_remote_method(InceptionShard2.forward, self.inception_shard2_rref,
                                           rpc.RRef(y))

            output_futs.append(fut_res)

        tensor_out_x, tensor_out_aux = [], []
        for fut in output_futs:
            val = fut.wait()
            tensor_out_x.append(val[0])
            tensor_out_aux.append(val[1])

        return InceptionOutputs(torch.cat(tensor_out_x), torch.cat(tensor_out_aux))

    def parameter_rrefs(self):
        remote_params = []
        # get RRefs of InceptionShard1.
        remote_params.extend(_parameter_rrefs(self.inception_shard1))
        # Create RRefs for local parameters.
        remote_params.extend(_remote_method(_parameter_rrefs, self.inception_shard2_rref))

        return remote_params
