import paddle.fluid.layers as L
from paddle.fluid.initializer import Normal
from paddle.fluid.dygraph import Layer, LayerList, Sequential, Linear, ParameterList

from helm.static.models.layers import BN, GlobalAvgPool, Conv2d, Act, BNAct, Pool2d
from helm.dynamic.models.nas.darts.operations import OPS, ReLUConvBN, FactorizedReduce

from torchvision.models import ResNet
class ReductionCell(Layer):

    def __init__(self, C_prev_prev, C_prev, C):
        super().__init__()
        self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1)
        self.preprocess1 = ReLUConvBN(C_prev, C, 1)

        self.branch_a1 = Sequential(
            Act(),
            Conv2d(C, C, (1, 3), stride=(1, 2), groups=8, bias=False),
            Conv2d(C, C, (3, 1), stride=(2, 1), groups=8, bias=False),
            BNAct(C, act='default', affine=True),
            Conv2d(C, C, 1),
            BN(C, affine=True),
        )
        self.branch_a2 = Sequential(
            Pool2d(3, stride=2, type='max'),
            BN(C, affine=True),
        )
        self.branch_a1 = Sequential(
            Act(),
            Conv2d(C, C, (1, 3), stride=(1, 2), groups=8, bias=False),
            Conv2d(C, C, (3, 1), stride=(2, 1), groups=8, bias=False),
            BNAct(C, act='default', affine=True),
            Conv2d(C, C, 1),
            BN(C, affine=True),
        )
        self.branch_a2 = Sequential(
            Pool2d(3, stride=2, type='max'),
            BN(C, affine=True),
        )

    def forward(self, s0, s1, *args):
        s0 = self.preprocess0(s0)
        s1 = self.preprocess1(s1)

        x0 = self.branch_a1(s0)
        x1 = self.branch_a1(s1)
        # if self.training and drop_prob > 0.:
        #     X0, X1 = drop_path(X0, drop_prob), drop_path(X1, drop_prob)

        x2 = self.branch_a2(s0)
        x3 = self.branch_a1(s1)
        # if self.training and drop_prob > 0.:
        #     X2, X3 = drop_path(X2, drop_prob), drop_path(X3, drop_prob)

        return L.concat([x0, x1, x2, x3], axis=1)


class MixedOp(Layer):

    def __init__(self, primitives, C):
        super().__init__()
        self._ops = LayerList()
        for primitive in primitives:
            op = OPS[primitive](C, 1)
            self._ops.append(op)

    def forward(self, x, hardwts, index):
        return sum(hardwts[i] * op(x) if i == index else hardwts[i] for i, op in enumerate(self._ops))


class NormalCell(Layer):

    def __init__(self, primitives, steps, multiplier, C_prev_prev, C_prev, C, reduction_prev):
        super().__init__()
        if reduction_prev:
            self.preprocess0 = FactorizedReduce(C_prev_prev, C)
        else:
            self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1)
        self.preprocess1 = ReLUConvBN(C_prev, C, 1)
        self.steps = steps
        self.multiplier = multiplier

        self.ops = LayerList()
        for i in range(self.steps):
            ops = LayerList()
            for j in range(2 + i):
                op = MixedOp(primitives, C)
                ops.append(op)
            self.ops.append(ops)

    def forward(self, s0, s1, hardwts, indices):
        s0 = self.preprocess0(s0)
        s1 = self.preprocess1(s1)
        states = [s0, s1]
        for i in range(self.steps):
            s = sum(self.ops[i][j](h, hardwts[i][j], int(indices[i][j])) for j, h in enumerate(states))
            states.append(s)

        return L.concat(states[-self.multiplier:], axis=1)


def gumbel_sample(a, tau):
    while True:
        gumbels = -L.log(-L.log(L.uniform_random(a.shape, min=0.0, max=1.0)))
        logits = (L.log_softmax(a, axis=1) + gumbels) / tau
        probs = L.softmax(logits, axis=1)
        index = L.argmax(probs, axis=1)
        one_h = L.one_hot(L.unsqueeze(index, 1), a.shape[1])
        hardwts = one_h - probs.detach() + probs
        if (not L.isfinite(gumbels).numpy()) or (not L.isfinite(probs).numpy()):
            continue
        else:
            return hardwts, [int(i) for i in index.numpy()]


class Network(Layer):

    def __init__(self, primitives, C, layers=8, steps=4, multiplier=4, stem_multiplier=3, num_classes=10, tau=10.0):
        super().__init__()
        self.C = C
        self.num_classes = num_classes
        self.layers = layers
        self.steps = steps
        self.multiplier = multiplier
        self.primitives = primitives
        self.tau = tau

        C_curr = stem_multiplier * C
        self.stem = Sequential(
            Conv2d(3, C_curr, kernel_size=3, bias=False),
            BN(C_curr),
        )

        C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
        self.cells = LayerList()
        reduction_prev = False
        for i in range(layers):
            if i in [layers // 3, 2 * layers // 3]:
                C_curr *= 2
                reduction = True
            else:
                reduction = False
            if reduction:
                cell = ReductionCell(C_prev_prev, C_prev, C_curr)
            else:
                cell = NormalCell(primitives, steps, multiplier, C_prev_prev, C_prev, C_curr, reduction_prev)
            self.cells.append(cell)
            reduction_prev = reduction
            C_prev_prev, C_prev = C_prev, multiplier * C_curr

        self.post_activ = BNAct(C_prev)
        self.avg_pool = GlobalAvgPool()
        self.classifier = Linear(C_prev, num_classes)

        self._initialize_alphas()

    def forward(self, input):
        hardwts, indices = zip(*[gumbel_sample(a, self.tau) for a in self.alphas])

        s0 = s1 = self.stem(input)
        for i, cell in enumerate(self.cells):
            s0, s1 = s1, cell(s0, s1, hardwts, indices)
        out = self.avg_pool(s1)
        out = self.post_activ(out)
        logits = self.classifier(out)
        return logits

    def _initialize_alphas(self):
        num_ops = len(self.primitives)

        self.alphas = ParameterList([
            L.create_parameter([i + 2, num_ops], 'float32', default_initializer=Normal(0, 1e-3))
            for i in range(self.steps)
        ])

    def model_parameters(self):
        for m in [self.stem, self.cells, self.post_activ, self.classifier]:
            for p in m.parameters():
                yield p

    def arch_parameters(self):
        return self.alphas


def test_net():
    net = Network(8, 8, 4, 4, 1)
