import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable

params_dict = {}
params_dict["input_dim"] = 416 * 416 * 3
params_dict["max_depth"] = 10
params_dict["output_dim"] = 2
params_dict["cuda"] = 'cpu'
params_dict["lr"] = 0.01
params_dict["momentum"] = 0.5
params_dict["batch_size"] = 64
params_dict["log-interval"] = 10
params_dict["weight_decay"] = 0.001

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
params_dict["cuda"] = device


# Modified
class InnerNode():
    def __init__(self, depth, params_dict):
        self.leaf = False
        self.prob = None

        self.params_dict = params_dict
        # Each Node with an affine function
        self.fc = nn.Linear(self.params_dict["input_dim"], 1)

        # Generate a random number from the strandard normal distribution N(0, 1)
        beta = torch.randn(1)
        self.beta = nn.Parameter(beta)

        # 0.1 is the temprature rate; lmbda is the penalty factor and increases as the depth increases
        self.lmbda = 0.1 * 2 ** (-depth)
        self.penalties = []

        self.build_child(depth)
        self.leaf_accumulator = []

    def reset(self):
        self.leaf_accumulator = []
        self.penalties = []
        self.left.reset()
        self.right.reset()

    def build_child(self, depth):
        if depth < self.params_dict["max_depth"]:
            self.left = InnerNode(depth + 1, self.params_dict)
            self.right = InnerNode(depth + 1, self.params_dict)
        else:
            self.left = LeafNode(self.params_dict)
            self.right = LeafNode(self.params_dict)

    def forward(self, x):
        return (torch.sigmoid(self.beta * self.fc(x)))

    def select_next(self, x):
        prob = self.forward(x)
        if prob < 0.5:
            return (self.left, prob)
        else:
            return (self.right, prob)

    def cal_prob(self, x, path_prob):
        # Probability of selecting right node
        self.prob = self.forward(x)
        self.path_prob = path_prob  # From last node
        left_leaf_accumulator = self.left.cal_prob(x, path_prob * (1 - self.prob))
        right_leaf_accumulator = self.right.cal_prob(x, path_prob * self.prob)
        self.leaf_accumulator.extend(left_leaf_accumulator)
        self.leaf_accumulator.extend(right_leaf_accumulator)
        return (self.leaf_accumulator)

    def get_penalty(self):
        penalty = (torch.sum(self.prob * self.path_prob) / torch.sum(self.path_prob), self.lmbda)
        if not self.left.leaf:
            left_penalty = self.left.get_penalty()
            right_penalty = self.right.get_penalty()
            self.penalties.append(penalty)
            self.penalties.extend(left_penalty)
            self.penalties.extend(right_penalty)
        return (self.penalties)


class LeafNode():
    def __init__(self, params_dict):
        self.params_dict = params_dict
        self.param = torch.randn(self.params_dict["output_dim"])
        self.param = nn.Parameter(self.param)
        if self.params_dict["cuda"] == "cuda":
            self.param = self.param.cuda()
        self.leaf = True
        self.softmax = nn.Softmax(dim=1)

    def forward(self):
        return (self.softmax(self.param.view(1, -1)))

    def reset(self):
        pass

    def cal_prob(self, x, path_prob):
        Q = self.forward()
        # Q = Q.expand((self.args.batch_size, self.args.output_dim))
        Q = Q.expand((path_prob.size()[0], self.params_dict["output_dim"]))
        return ([[path_prob, Q]])


class SoftDecisionTree(nn.Module):
    def __init__(self, params_dict):
        super(SoftDecisionTree, self).__init__()
        self.params_dict = params_dict
        self.root = InnerNode(1, self.params_dict)
        # collect parameters and modules under root node
        self.collect_parameters()
        # self.optimizer = optim.SGD(self.parameters(), lr=self.params_dict["lr"], momentum=self.params_dict["momentum"])
        # self.test_acc = []
        self.define_extras(self.params_dict["batch_size"])
        # self.best_accuracy = 0.0

    def define_extras(self, batch_size):
        # define target_onehot and path_prob_init batch size,
        # because these need to be defined according to batch size, which can be differ
        self.target_onehot = torch.FloatTensor(batch_size, self.params_dict["output_dim"])
        self.target_onehot = Variable(self.target_onehot)
        self.path_prob_init = Variable(torch.ones(batch_size, 1))

        if self.params_dict["cuda"] != "cpu":
            self.target_onehot = self.target_onehot.cuda()
            self.path_prob_init = self.path_prob_init.cuda()

    # def forward(self, x):
    #     node = self.root
    #     path_prob = Variable(torch.ones(self.params_dict["batch_size"], 1))
    #     while not node.leaf:
    #         node, prob = node.select_next(x)
    #         path_prob *= prob
    #     return node()

    def cal_loss(self, x, y):
        # View in vector
        x = x.flatten(1)  # Modification

        batch_size = y.size()[0]
        y_ = Variable(y)
        y_ = y_.long()

        if self.params_dict["cuda"] != "cpu":
            x = x.cuda()
            y_ = y_.cuda()

        y_ = F.one_hot(y_, num_classes=self.params_dict["output_dim"])

        leaf_accumulator = self.root.cal_prob(x, self.path_prob_init)
        loss = 0.
        max_prob = [-1. for _ in range(batch_size)]
        max_Q = [torch.zeros(self.params_dict["output_dim"]) for _ in range(batch_size)]

        for (path_prob, Q) in leaf_accumulator:
            TQ = torch.bmm(y_.view(batch_size, 1, self.params_dict["output_dim"]).float(),
                           torch.log(Q).view(batch_size, self.params_dict["output_dim"], 1).float()).view(-1, 1)
            loss += path_prob * TQ
            path_prob_numpy = path_prob.cpu().data.numpy().reshape(-1)
            for i in range(batch_size):
                if max_prob[i] < path_prob_numpy[i]:
                    max_prob[i] = path_prob_numpy[i]
                    max_Q[i] = Q[i]
        loss = loss.mean()
        penalties = self.root.get_penalty()
        C = 0.
        for (penalty, lmbda) in penalties:
            C -= lmbda * 0.5 * (torch.log(penalty) + torch.log(1 - penalty))
        output = torch.stack(max_Q)
        self.root.reset()
        return (-loss + C, output)

    def collect_parameters(self):
        nodes = [self.root]
        self.module_list = nn.ModuleList()
        self.param_list = nn.ParameterList()
        while nodes:
            node = nodes.pop(0)
            if node.leaf:
                param = node.param
                self.param_list.append(param)
            else:
                fc = node.fc
                beta = node.beta
                nodes.append(node.right)
                nodes.append(node.left)
                self.param_list.append(beta)
                self.module_list.append(fc)
