from networkx.algorithms.centrality.trophic import trophic_levels
import torch
from torch.nn.modules.batchnorm import BatchNorm2d
from torch.optim import *
from torch import nn

from util.logger import Recorder
recorder_config = {
    "process": {
        "header" : "node, epoch, loss, accurancy, current, size",
        "file": "output/recorder/bnntree_process.csv",
    }
}

@Recorder(header=recorder_config["process"]["header"], recorder_file=recorder_config["process"]["file"], mode="w")
def recorde_process(*args):
    """
    header: node_name, epoch, loss, accurancy, current, size
    """
    pass

class BNNNode(nn.Module):
    """二分节点"""
    def __init__(self, val, id, node_name, inch, level):
        super(BNNNode, self).__init__()
        self.id = id
        self.val = val
        self.node_name = node_name
        self.inch = inch
        self.level = level
        self.left = None
        self.right = None

        self.module()

    def module(self):
        raise NotImplementedError

    def set_left(self, left):
        self.left = left
    
    def set_right(self, right):
        self.right = right

    def forward(self, x):
        out = self.nn_stack(x)
        return out

    def train(self, dataloader, loss_fn, optimizer, device, epoch):
        size = len(dataloader.dataset)
        self.loss_fn = loss_fn
        acc_total = 0
        loss_total = 0
        for batch, (X, y) in enumerate(dataloader):
            X, y = X.to(device), y.to(device)

            pred = self(X)
            y[y <= self.val] = 0
            y[y > self.val] = 1
            loss = loss_fn(pred, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            acc = (pred.argmax(1) == y).type(torch.float).sum().item()
            acc_total += acc
            loss_total += loss.item()
            if batch % 50 == 0:
                loss, current = loss.item() / len(X), (batch+1) * len(X)
                acc = 100 * acc / len(X)
                print(f"node[{self.get_node_name()}]  epoch:{epoch:>3d}  loss: {loss:>7f}    acc: {acc:>0.2f}%   [{current:>5d}/{size:>5d}]")
                recorde_process(self.get_node_name(), epoch, loss, acc, current, size)
        print(f"Train node[{self.get_node_name()}]  loss: {(loss_total / size):>7f}    acc: {(100 * acc_total / size):>0.1f}%" + "=" * 20)
        recorde_process(self.get_node_name(), epoch, loss_total / size, 100 * acc_total / size, size, size)

    def get_node_name(self):
        if (self.left is None) & (self.right is None):
            return self.val
        return self.node_name

    def __str__(self):
        return str(self.val)

class BNNTMNISTNode(BNNNode):
    def module(self):
        self.nn_stack = nn.Sequential(
            nn.Conv2d(self.inch, 32, kernel_size=3),
            nn.ReLU(),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, kernel_size=3),
            nn.ReLU(),
            nn.Flatten(),
            nn.Linear(3872, 64),
            nn.ReLU(),
            nn.Linear(64, 2)
        )


class BNNTCIFARNode(BNNNode):
    def module(self):
        self.nn_stack = self.level_node(self.level)

    def level_node(self, level):
        """
        dict:{
            节点所在树的层数: 网络结构
        }
        """

        nn_dict = {
            0: nn.Sequential(
                nn.Conv2d(self.inch, 16, kernel_size=2), # (16, 31, 31)
                nn.ReLU(),
                nn.BatchNorm2d(16),
                nn.Conv2d(16, 32, kernel_size=2), # (32, 30, 30)
                nn.ReLU(),
                nn.BatchNorm2d(32),
                nn.MaxPool2d(2), # (16, 15, 15)
                nn.Conv2d(32, 48, kernel_size=3), #(64, 13, 13)
                nn.ReLU(),
                nn.BatchNorm2d(48),
                nn.MaxPool2d(2), # (64, 6, 6)
                nn.Conv2d(48, 64, kernel_size=3), #(32, 4, 4)
                nn.ReLU(),
                nn.BatchNorm2d(64),
                nn.AvgPool2d(2), # (64, 2, 2)
                nn.Flatten(),
                # nn.Linear(2304, 512),
                # nn.ReLU(),
                # nn.Dropout(0.5),
                nn.Linear(256, 64),
                nn.ReLU(),
                nn.Linear(64, 2)
            ),
            1: nn.Sequential(
                nn.Conv2d(self.inch, 32, kernel_size=3), # (32, 30, 30)
                nn.ReLU(),
                nn.MaxPool2d(2), # (32, 15, 15)
                nn.Conv2d(32, 32, kernel_size=3), # (32, 13, 13)
                nn.ReLU(),
                nn.AvgPool2d(2), # (32, 6, 6)
                nn.Flatten(),
                nn.Linear(1152, 512),
                nn.ReLU(),
                nn.Dropout(0.5),
                nn.Linear(512, 64),
                nn.ReLU(),
                nn.Linear(64, 2)
            ),
            2: nn.Sequential(
                nn.Conv2d(self.inch, 16, kernel_size=3), # (16, 30, 30)
                nn.ReLU(),
                nn.MaxPool2d(2), # (16, 15, 15)
                nn.Conv2d(16, 32, kernel_size=3), # (32, 13, 13)
                nn.ReLU(),
                nn.AvgPool2d(2), # (32, 6, 6)
                nn.Flatten(),
                nn.Linear(1152, 512),
                nn.ReLU(),
                nn.Dropout(0.3),
                nn.Linear(512, 64),
                nn.ReLU(),
                nn.Linear(64, 2)
            ),
            3: nn.Sequential(
                nn.Conv2d(self.inch, 16, kernel_size=3), # (16, 30, 30)
                nn.ReLU(),
                nn.MaxPool2d(2), # (16, 15, 15)
                nn.Conv2d(16, 32, kernel_size=3), # (32, 13, 13)
                nn.ReLU(),
                nn.AvgPool2d(2), # (32, 6, 6)
                nn.Flatten(),
                nn.Linear(1152, 512),
                nn.ReLU(),
                nn.Dropout(0.2),
                nn.Linear(512, 64),
                nn.ReLU(),
                nn.Linear(64, 2)
            ),
            4: nn.Sequential(
                nn.Conv2d(self.inch, 16, kernel_size=3), # (16, 30, 30)
                nn.ReLU(),
                nn.MaxPool2d(2), # (16, 15, 15)
                nn.Conv2d(16, 32, kernel_size=3), # (32, 13, 13)
                nn.ReLU(),
                nn.AvgPool2d(2), # (32, 6, 6)
                nn.Flatten(),
                nn.Linear(1152, 512),
                nn.ReLU(),
                nn.Dropout(0.2),
                nn.Linear(512, 64),
                nn.ReLU(),
                nn.Linear(64, 2)
            )
        }
        # if level == 0:
        #     from NN.ResNet import ResNet18
        #     resnet = ResNet18(3, 2)
        #     return resnet
        # else:
        #     s = 3
        s = 3 if level > 0 else 2
        return nn_dict[level]
