import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np

from NN.nn_module import NNM
from util.logger import track_log
from util.wordnet import *


class RNWN(NNM):
    """
    ResNet18-WordNet
    """
    loss_log = track_log("output/loss_log.csv").get_logger()

    def module(self):
        self.nn_stack = nn.Sequential(
            nn.Linear(self.in_channels, self.y_shape),
            # nn.Linear(256, self.y_shape)

        )

    def forward(self, x):
        return self.nn_stack(x)

    def set_loss(self, loss_num):
        self.loss_num = loss_num

    def loss_fn(self, y1, y2):
        # if self.loss_num:
        #     return self.custom_fn

        if self.loss_num is 1:
            return self.embedding_loss(y1, y2)
        else:
            return self.custom_fn(y1, y2)
    
    def custom_fn(self, y1, y2):
        """
        loss_num: <= 0（默认）
        未嵌入上位规则树的 cifar10 crossentropyloss
        """
        lf =  nn.CrossEntropyLoss()
        return lf(y1, y2)
    
    def embedding_loss(self, y1, y2):
        """
        loss_num: 1
        """
        pred_label = y1[:, 0:10]
        pred_rule = y1[:, 10:]
        # print(y2)
        rule = self.get_rule(y2)
        l1 = nn.CrossEntropyLoss()
        # l2 = nn.MSELoss()
        pre_rule_log = torch.log(torch.softmax(pred_rule, dim=1))

        alpha = 1
        beta = 0.1
        loss1 = l1(pred_label, y2)
        # loss2 = l2(pred_rule, rule)
        # cross entropy
        loss2 = -torch.sum(rule * pre_rule_log) / rule.shape[0]
        
        loss = alpha * loss1 + beta * loss2
        self.loss_log.info(f"{loss1.item()}, {loss2.item()}, {loss.item()}")
        return loss

    def get_rule(self, y):
        batch = torch.Tensor(list(map(lambda x: rule[num_2_class[str(x.item())]], y)))
        return batch

    

class RNWNAUG(NNM):
    """
    resnet18-wordnet augmentation
    """
    loss_log = track_log("output/loss_rule_aug_log.csv").get_logger()
    # rule_log = track_log("output/rule_log_aug.csv").get_logger()
    def module(self):
        self.l1 = nn.Linear(self.in_channels, 32)

        self.l1_rule = nn.Linear(self.in_channels, 256)
        self.l1_rule1 = nn.Linear(256, 9)

        self.l2 = nn.Linear(41, self.y_shape)


    def forward(self, X):
        a1 = self.l1(X) # 56

        a1_rule  = self.l1_rule(X) # 256
        self.a1_rule1 = self.l1_rule1(a1_rule) # 9 -> 19

        a2 = torch.cat((a1, self.a1_rule1), dim=1) # 10 + 9 = 19
        output = self.l2(a2)

        return output

    def loss_fn(self, y1, y2):
        pred_rule = self.a1_rule1
        rule = self.get_rule(y2)

        pre_rule_log = torch.log(torch.softmax(pred_rule, dim=1))

        alpha = 1
        beta = 0.1
        losfnc1 = nn.CrossEntropyLoss()
        loss1 = losfnc1(y1, y2)
        # cross entropy
        loss2 = -torch.sum(rule * pre_rule_log) / rule.shape[0]
        
        loss = alpha * loss1 + beta * loss2
        self.loss_log.info(f"{loss1.item()}, {loss2.item()}, {loss.item()}, {pred_rule.detach().numpy()}, {rule.detach().numpy()}")
        # self.rule_log.info(f"{pred_rule.detach().numpy()}, {rule.detach().numpy}")
        return loss

    
    def get_rule(self, y):
        batch = torch.Tensor(list(map(lambda x: rule[num_2_class[str(x.item())]], y)))
        return batch


    