import sys
sys.path.append("")
from utils.loss import FScoreLoss
import torch
import sklearn
import numpy as np
from torch import nn
from torch.nn.modules import Sequential
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init

import time


VECTOR_SIZE = 4096
HID_BLOCK = 6
HID_SIZE = 1024
BETA = 1
DROP_OUT_RATE = 0.05
LEAKY_RATE = 0.125

def build_model(vector_size=VECTOR_SIZE,
                num_block=HID_BLOCK,
                hid_size=HID_SIZE):

    def block(mode='middle'):
        block_dict = {
            'head':
            Sequential(nn.Linear(vector_size, hid_size),
                       nn.BatchNorm1d(num_features=hid_size), nn.Sigmoid()),
            'middle':
            Sequential(nn.Dropout(p=DROP_OUT_RATE),
                       nn.Linear(hid_size, hid_size),
                       nn.BatchNorm1d(num_features=hid_size), nn.Sigmoid()),
            'tail':
            Sequential(nn.Linear(hid_size, 1), nn.Sigmoid())
        }
        if mode in block_dict:
            result = block_dict[mode]
            return result
        else:
            raise ValueError(
                "ilegal mode:{error_mode}".format(error_mode=mode))

    model = Sequential()
    model.append(block('head'))
    for _ in range(num_block):
        model.append(block())
    model.append(block('tail'))
    return model


class clf_model(sklearn.base.BaseEstimator):
    def __init__(self, epoches=10, verbose=0, validation=None, beta=1) -> None:
        self.model = build_model()
        self.criterion = FScoreLoss(beta)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
        self.epoches = epoches
        self.verbose = verbose
        self.acc_device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.conf_mtx_record = []
        self.auc_record = []
        self.validation = validation
        if self.validation is not None:
            self.val_X, self.val_y = self.validation
            self.val_X = torch.tensor(self.val_X.astype(np.float32)).to(
                self.acc_device)

    def fit(self, X, y):
        skf = StratifiedKFold(n_splits=100)
        self.model.to(self.acc_device)
        self.model.train()
        if self.verbose != 0:
            print('Start Training')
        
        for epoch in range(self.epoches):
            start_time = time.time()
            for idx, (_, batch_index) in enumerate(skf.split(X, y)):
                batch_X, batch_y = torch.tensor(
                    X[batch_index].astype(np.float32),
                    requires_grad=True).to(self.acc_device), torch.tensor(
                        y[batch_index].astype(np.float32),
                        requires_grad=True).to(self.acc_device)
                y_pred = self.model(batch_X).reshape(-1)
                loss = self.criterion(y_pred, batch_y)
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                torch.cuda.empty_cache()
            end_time = time.time()
            
            if self.verbose != 0:
                if self.validation is not None:
                    print(
                    'epoch_{epoch}\t\ttime:{time:.2f}s\t\tloss:{loss}'.format(
                        epoch=epoch,
                        time=end_time - start_time,
                        loss=loss.item()))
                    
                    y_pred = self.model(self.val_X).to('cpu').detach().numpy()
                    conf_mtx = confusion_matrix(
                        self.val_y,
                        np.around(y_pred).astype("int64"))
                    self.conf_mtx_record.append(conf_mtx)
                    self.auc_record.append(roc_auc_score(self.val_y, y_pred))
                    # plt.figure()
                    # sns.heatmap(conf_mtx, annot=True, cmap="crest", fmt='.20g')
                    # plt.title("EPOCH_{epoch}".format(epoch=epoch))
                    # plt.savefig(
                    #     "train_img/epoch{epoch}.svg".format(epoch=epoch))

        self.model.to('cpu')
        self.model.eval()

    def predict(self, X):
        X = torch.from_numpy(X.astype(np.float32))
        return np.round(self.model(X).detach().numpy())

    def predict_proba(self, X):
        X = torch.from_numpy(X.astype(np.float32))
        return self.model(X).detach().numpy()

class GCN(nn.Module):
    def __init__(self, graph, in_c, hid_c, out_c):
        """
        GCN
        :param in_c: input channels
        :param hid_c:  hidden nodes
        :param out_c:  output channels
        """
        super(GCN, self).__init__()
        self.linear_1 = nn.Linear(in_c, hid_c)
        self.linear_2 = nn.Linear(hid_c, out_c)
        self.act = nn.ReLU()
        self.graph = graph
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        
    def forward(self, data):
        # graph_data = data["graph"][0]  # [N, N]
        graph_data = self.process_graph(self.device, self.graph)
        flow_x = data # [B, N, D]
        # B, N = flow_x.size(0), flow_x.size(1)
        # flow_x = flow_x.view(B, N, -1)  # [B, N, H*D]  H = 6, D = 1
        output_1 = self.linear_1(flow_x)  # [B, N, hid_C]
        output_1 = self.act(torch.matmul(graph_data, output_1))  # [N, N], [B, N, Hid_C]
        output_2 = self.linear_2(output_1)
        output_2 = self.act(torch.matmul(graph_data, output_2))  # [B, N, 1, Out_C]

        return output_2.unsqueeze(2)

    @staticmethod
    def process_graph(device, graph_data):
        N = graph_data.size(0)
        matrix_i = torch.eye(N, dtype=graph_data.dtype).to(device)
        graph_data += matrix_i  # A~ [N, N]

        degree_matrix = torch.sum(graph_data, dim=-1, keepdim=False)  # [N]
        degree_matrix = degree_matrix.pow(-1)
        degree_matrix[degree_matrix == float("inf")] = 0.  # [N]

        degree_matrix = torch.diag(degree_matrix)  # [N, N]

        return torch.mm(degree_matrix, graph_data)  # D^(-1) * A = \hat(A)

class GraphAttentionLayer(nn.Module):
    def __init__(self, in_c, out_c, alpha=0.2):
        """
        graph attention layer
        :param in_c:
        :param out_c:
        :param alpha:
        """
        super(GraphAttentionLayer, self).__init__()
        self.in_c = in_c
        self.out_c = out_c
        self.alpha = alpha

        self.W = nn.Parameter(torch.empty(size=(in_c, out_c)))
        nn.init.xavier_normal_(self.W.data)
        self.a = nn.Parameter(torch.empty(size=(2 * out_c, 1)))
        nn.init.xavier_normal_(self.a.data)
        self.leakyrelu = nn.LeakyReLU(self.alpha)

    def forward(self, features, adj):
        B, N = features.size(0), features.size(1)
        adj = adj + torch.eye(N, dtype=adj.dtype).cuda()  # A+I
        h = torch.matmul(features, self.W)  # [B,N,out_features]
        # [B, N, N, 2 * out_features]
        a_input = torch.cat([h.repeat(1, 1, N).view(B, N * N, -1), h.repeat(1, N, 1)], dim=2).view(B, N, -1, 2 * self.out_c)
        e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))  # [B,N, N, 1] => [B, N, N]
        zero_vec = -1e12 * torch.ones_like(e)
        attention = torch.where(adj > 0, e, zero_vec)  # [B,N,N]
        attention = F.softmax(attention, dim=2)  # softmax [N, N]
        h_prime = torch.matmul(attention, h)  # [B,N, N]*[N, out_features] => [B,N, out_features]
        return h_prime

class GAT(nn.Module):
    def __init__(self, graph, in_c, hid_c, out_c, n_heads=6):
        """
        :param in_c: int, number of input channels.
        :param hid_c: int, number of hidden channels.
        :param out_c: int, number of output channels.
        :param n_heads: how many heads
        """
        super(GAT, self).__init__()
        self.attentions = nn.ModuleList([GraphAttentionLayer(in_c, hid_c) for _ in range(n_heads)])
        self.conv2 = GraphAttentionLayer(hid_c*n_heads, out_c)
        self.act = nn.ELU()
        self.graph = graph

    def forward(self, data):
        # data prepare
        adj = self.graph  # [N, N]
        x = data  # [B, N, D]
        # B, N = x.size(0), x.size(1)
        # x = x.view(B, N, -1)  # [B, N, H*D]

        # forward
        outputs = self.act(torch.cat([attention(x, adj) for attention in self.attentions], dim=-1))
        output_2 = self.act(self.conv2(outputs, adj))

        return output_2.unsqueeze(2)  # [B,1,N,1]

class ChebConv(nn.Module):

    def __init__(self, in_c, out_c, K, bias=True, normalize=True):
        """
        ChebNet conv
        :param in_c: input channels
        :param out_c:  output channels
        :param K: the order of Chebyshev Polynomial
        :param bias:  if use bias
        :param normalize:  if use norm
        """
        super(ChebConv, self).__init__()
        self.normalize = normalize

        self.weight = nn.Parameter(torch.Tensor(K + 1, 1, in_c, out_c))  # [K+1, 1, in_c, out_c]
        init.xavier_normal_(self.weight)

        if bias:
            self.bias = nn.Parameter(torch.Tensor(1, 1, out_c))
            init.zeros_(self.bias)
        else:
            self.register_parameter("bias", None)

        self.K = K + 1

    def forward(self, inputs, graph):
        """

        :param inputs: he input data, [B, N, C]
        :param graph: the graph structure, [N, N]
        :return: convolution result, [B, N, D]
        """
        L = ChebConv.get_laplacian(graph, self.normalize)  # [N, N]
        mul_L = self.cheb_polynomial(L).unsqueeze(1)  # [K, 1, N, N]
        result = torch.matmul(mul_L, inputs)  # [K, B, N, C]
        result = torch.matmul(result, self.weight)  # [K, B, N, D]
        result = torch.sum(result, dim=0) + self.bias  # [B, N, D]

        return result

    def cheb_polynomial(self, laplacian):
        """
        Compute the Chebyshev Polynomial, according to the graph laplacian

        :param laplacian: the multi order Chebyshev laplacian, [K, N, N]
        :return:
        """
        N = laplacian.size(0)  # [N, N]
        multi_order_laplacian = torch.zeros([self.K, N, N], device=laplacian.device, dtype=torch.float)  # [K, N, N]
        multi_order_laplacian[0] = torch.eye(N, device=laplacian.device, dtype=torch.float)

        if self.K == 1:
            return multi_order_laplacian
        else:
            multi_order_laplacian[1] = laplacian
            if self.K == 2:
                return multi_order_laplacian
            else:
                for k in range(2, self.K):
                    multi_order_laplacian[k] = 2 * torch.mm(laplacian, multi_order_laplacian[k - 1]) - \
                                               multi_order_laplacian[k - 2]

        return multi_order_laplacian

    @staticmethod
    def get_laplacian(graph, normalize):
        """
        compute the laplacian of the graph
        :param graph: the graph structure without self loop, [N, N]
        :param normalize: whether to used the normalized laplacian
        :return:
        """
        if normalize:
            D = torch.diag(torch.sum(graph, dim=-1) ** (-1 / 2))
            L = torch.eye(graph.size(0), device=graph.device, dtype=graph.dtype) - torch.mm(torch.mm(D, graph), D)
        else:
            D = torch.diag(torch.sum(graph, dim=-1))
            L = D - graph
        return L


class ChebNet(nn.Module):

    def __init__(self, graph, in_c, hid_c, out_c, K):
        """
        :param in_c: int, number of input channels.
        :param hid_c: int, number of hidden channels.
        :param out_c: int, number of output channels.
        :param K:
        """
        super(ChebNet, self).__init__()
        self.conv1 = ChebConv(in_c=in_c, out_c=hid_c, K=K)
        self.conv2 = ChebConv(in_c=hid_c, out_c=out_c, K=K)
        self.act = nn.ReLU()
        self.graph = graph

    def forward(self, data):
        graph_data =self.graph  # [N, N]
        flow_x = data  # [B, N, D]

        # B, N = flow_x.size(0), flow_x.size(1)

        # flow_x = flow_x.view(B, N, -1)  # [B, N, H*D]

        output_1 = self.act(self.conv1(flow_x, graph_data))
        output_2 = self.act(self.conv2(output_1, graph_data))

        return output_2.unsqueeze(2)

class multi_model():
    def __init__(self, model_list, epoches, beta = 1) -> None:
        self.model_list = model_list
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.epoches = epoches
        self.criterion = torch.nn.BCEWithLogitsLoss().to(self.device)
        self.model = self._build("GAT") # "GCN", "GAT, "ChebNet"
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
        
        # 构建一个GNN模型
    def _build(self, model):
        graph = np.ones((6, 6))
        for i in range(6):
            graph[i][i] = 0
        graph = torch.from_numpy(graph.astype(np.float32)).to(self.device)
       
        if(model == "GCN"):
            return GCN(graph, 1, 6, 1)
        elif(model == "GAT"):
            return GAT(graph, 1, 6, 1)
        elif(model == "ChebNet"):
            return ChebNet(graph, 1, 6, 1, 1)
        else:
            raise NotImplementedError()

    # 输入 X.shape = (n, 4) y.shape = (6, n)
    # 假设每个类有k个模型，那么总共有6 * k个模型
    # 那么中间预测结果将会是 middle.shape = (n, 6 * k)
    # 接下来通过GNN神经网络获取最终输出 final = (n, 6)
    # GNN神经网络要综合考虑 6 * k 个输出，输出6维向量
    def fit(self, train_loader):
        # skf = KFold(n_splits=4)
        self.model.to(self.device)
        self.model.train()

        print('Start Training')
        
        for epoch in range(self.epoches):
            start_time = time.time()
            total_loss = 0

            # 返回的是数据索引
            # for batch_X, batch_y in zip(X, y): 
            for data in train_loader:
                batch_X = data[:, :4096].type(torch.float32).to(self.device)
                batch_y = data[:, 4096:].type(torch.float32).to(self.device)
                # batch_X = torch.tensor(np.array([XX.astype(np.float32)]),requires_grad=True).to(self.device)
                # batch_y = torch.tensor(np.array([yy.astype(np.float32)]),requires_grad=True).reshape(-1).to(self.device)
                # print(batch_X.shape)
                # print(batch_y.shape)

                # 构建 yy.shape = (n, 6 * k)
                # 模型输出
                # print("small_model")
                yy = None
                for small_model in self.model_list:
                    small_model.model.to(self.device)
                    y_pred = small_model.model(batch_X).reshape(-1)
                    # print(y_pred)
                    if(yy != None):
                        yy = torch.vstack((yy, y_pred))
                    else:
                        yy = y_pred
                yy = yy.T.reshape((len(batch_y), 6, 1))
                y_pred = self.model(yy).reshape(len(batch_y), 6)

                loss = self.criterion(y_pred, batch_y)
                total_loss += loss.item()
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

            end_time = time.time()

            print('epoch_{epoch}\t\ttime:{time:.2f}s\t\tloss:{loss}'.format(
                epoch=epoch, 
                time=end_time - start_time,
                loss=total_loss))
                    
        self.model.eval()

    def predict_without_GNN(self, X):
        answer = None
        kk = 0
        for XX in X:
            batch_X = torch.tensor(np.array([XX.astype(np.float32)]),requires_grad=True).to(self.device)

            # 构建 yy.shape = (n, 6 * k)
            # 模型输出
            # print("small_model")
            yy = None
            for small_model in self.model_list:
                small_model.model.to(self.device)
                y_pred = small_model.model(batch_X).reshape(-1)
                # print(y_pred)
                if(yy != None):
                    yy = torch.vstack((yy, y_pred))
                else:
                    yy = y_pred
            # print("big_model")
            yy = yy.detach().cpu().numpy()

            if(kk == 0):
                answer = yy.reshape(1, 6)
                kk = 1
            else:
                answer = np.vstack((answer, yy.reshape(1, 6)))
        # print(answer.shape)
        return answer

    def predict_proba(self, X):
        print("predict_proba")
        answer = None
        kk = 0
        for XX in X:
            batch_X = torch.tensor(np.array([XX.astype(np.float32)]),requires_grad=True).to(self.device)

            # 构建 yy.shape = (n, 6 * k)
            # 模型输出
            # print("small_model")
            yy = None
            for small_model in self.model_list:
                small_model.model.to(self.device)
                y_pred = small_model.model(batch_X).reshape(-1)
                # print(y_pred)
                if(yy != None):
                    yy = torch.vstack((yy, y_pred))
                else:
                    yy = y_pred
            # print("big_model")
            yy.to(self.device)  
            yy = torch.round(yy).reshape(1, 6, 1)
            self.model.to(self.device)

                      
            y_pred = self.model(yy).reshape(-1)
            temp = self.model(yy).detach().cpu().numpy()
            if(kk == 0):
                answer = temp.reshape(1, 6)
                kk = 1
            else:
                answer = np.vstack((answer, temp.reshape(1, 6)))
        # print(answer.shape)
        return answer