import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.parameter import Parameter
from torch_geometric.data import Data ,Batch
from torch_geometric.loader import DataLoader
from torch_geometric.nn import GCNConv, global_mean_pool
import math
import sys
import os
import json
import pickle
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from tasks.bvd.annotation_parser import annotation_json_parser

# TODO 找到更好的模型参数存储方式

# GCN model
class GCN(nn.Module): 
    def __init__(self, nfeat, nhid, nclass, batch_size=1, annotation_dir=None, annotation_usage=None, device=None, dropout=0.5):
        # nfeat: num of features    of each node
        # nhid: size of hidden layer (GCNLayer1 output size && GCNLayer2 input size)
        # nclass: num of classes    about task
        # dropout: dropout rate
        super(GCN, self).__init__()
        # self.gc1 = GCNLayer(nfeat, nhid)
        # self.gc2 = GCNLayer(nhid, nclass)
        
        self.dropout = dropout
        self.batch_size = batch_size
        self.nhid = nhid
        self.annotation_dir = annotation_dir
        self.annotation_usage = annotation_usage

        if device is None:
            self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        else:
            self.device = device
        
        self.adapte_linear=nn.Linear(nfeat,nhid)    
        
        self.conv1 = torch.nn.ModuleList([GCNConv(nhid, nhid) for i in range(3)])
        self.conv2 = torch.nn.ModuleList([GCNConv(nhid, nhid) for i in range(3)])
        self.linear= nn.Linear(self.nhid,self.nhid)
        self.classicfier=nn.Linear(self.nhid,nclass)
        
        self.to(self.device)

    # 老forward函数，针对所有asm节点做全量投票，一节点一票
    def forward_old(self, data):
        '''
        # x: input features
        # edge_index: edge index tensor (2, num_edges)
        # ret = []
        # x_batched, edge_index_batched = data['x'], data['edge_index']
        # for i in range(x_batched.shape[0]): # TODO：最简单的实现
        #     x_temp = self.conv1(x_batched[i], edge_index_batched[i])
        #     x_temp = F.relu(x_temp)
            
        #     x_temp = self.conv2(x_temp, edge_index_batched[i])
        #     x_temp = F.relu(x_temp)
        #     x_temp = F.dropout(x_temp, training=self.training)


        #     # adj: adjacency matrix
        #     # x = F.relu(self.gc1(x, adj))
        #     # x = F.dropout(x, self.dropout, training=self.training)
        #     # x = self.gc2(x, adj)
        #     ret.append(x_temp)

        #batch_hidden_x = torch.stack(ret)
        '''
        data=data.to(self.device)
        x=self.adapte_linear(data.x)
        print(data.file_name[0].split('/'))
        exit()
        x=F.relu(x)
        #data = Batch.from_data_list(data)
        for i in range(3):
            x = self.conv1[i](x,data.edge_index)
            x =  F.relu(x)
            
        for i in range(3):
            x = self.conv2[i](x,data.edge_index)
            x =  F.relu(x)
        #print(x.shape)
        #x=x.reshape(-1,1000,self.nhid) #TODO: 更改为预定义变量
        x = global_mean_pool(x, data.batch) 
        #print(x.shape)
        y = self.linear(x)
        y= self.classicfier(y)
        #F.log_softmax(, dim=1)
        #print(len(ret))
        return y

    # 新forward函数，如果存在敏感函数，则只给敏感函数节点投票权，其他节点不能投票；否则按老函数处理
    def forward(self, data):
        data = data.to(self.device)
        # print('[TRAINING] Training model on device:', self.device)
        x = self.adapte_linear(data.x)
        addr_ls = data.addr_ls[0]
        x = F.relu(x)
        for i in range(3):
            x = self.conv1[i](x, data.edge_index)
            x = F.relu(x)
        for i in range(3):
            x = self.conv2[i](x, data.edge_index)
            x = F.relu(x)
        sample_name = data.file_name[0].split('/')[-2]
        if 'good' in data.file_name[0].split('/'): # NOTE：尽管这里直接取出来了label，但与模型决策没有直接关系，只用于寻找注解文件
            sample_label = 'good'
        else :
            sample_label = 'bad'
        # print(self.annotation_dir)
        # print(sample_label)
        # print(sample_name)
        annotation_path = os.path.join(str(self.annotation_dir), sample_label, sample_name)
        if os.path.exists(annotation_path) and (self.annotation_usage == 'vote' or self.annotation_usage == 'all'):
            asm_annotation_path = os.path.join(annotation_path, r'result.json')
            decompile_annotation_path = os.path.join(annotation_path, r'decompile_result.json')
            if os.path.exists(asm_annotation_path) and os.path.exists(decompile_annotation_path):
                parser = annotation_json_parser(asm_annotation_path, decompile_annotation_path)
                annotation = parser.construct()
                if {**annotation.inlined_func_dict, **annotation.direct_sensitive_dict, **annotation.plt_sensitive_addr_dict} != {}:
                    # 可以取出注解来进行处理，只允许注解节点投票
                    # TODO 具体如何平均还可能需要改动，目前是有多少相关的就取多少出来，然后平均；一个待思考的问题：如何带着batch来做建模和处理？
                    sensitive_asm_addr_pair_ls = parser.get_sensitive_asm_addrs(annotation)
                    idx, node_cnt = 0, 0
                    mean_tensor = torch.zeros_like(x[0])
                    while idx < len(addr_ls):
                        for (start_addr, end_addr) in sensitive_asm_addr_pair_ls:
                            if int(addr_ls[idx], 16) >= int(start_addr, 16) and int(addr_ls[idx], 16) <= int(end_addr, 16):
                                mean_tensor += x[idx]
                                node_cnt += 1
                                break
                        idx += 1
                    if node_cnt != 0:
                        mean_tensor /= node_cnt
                    else:
                        # TODO 据观察，注解设计存在地址对应不上的问题，待解决后补充相关代码
                        print('error:')
                        print(sample_label, sample_name)
                        print(sensitive_asm_addr_pair_ls)
                        print(addr_ls)
                    # print(mean_tensor)
                    # print(sample_name)
                    y = self.linear(mean_tensor.reshape(1, self.nhid))
                    y = self.classicfier(y)
                    # exit()
                else:
                    # asm注解全空，默认全员投票
                    x = global_mean_pool(x, data.batch)
                    y = self.linear(x)
                    y = self.classicfier(y)
            else:
                # 找不到注解文件，默认全员投票
                x = global_mean_pool(x, data.batch)
                y = self.linear(x)
                y = self.classicfier(y)
        else:
            # 找不到注解路径，默认全员投票
            x = global_mean_pool(x, data.batch)
            y = self.linear(x)
            y = self.classicfier(y)
        return y


# GCN Visualization model
class GCNSampleVisual_old(nn.Module):
    def __init__(self, base_mod, vis_file, device=None, dropout=0.5):
        super(GCNSampleVisual, self).__init__()
        self.dropout = dropout
        self.vis_file = vis_file
        # self.nhid = nhid
        # self.nfeat = nfeat
        # self.nclass = nclass
        self.base_mod = base_mod
        self.model = None
        if device is None:
            self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        else:
            self.device = device
        if os.path.exists(self.base_mod):
            # NOTE Only be used to predefined GCN
            mod = GCN(nfeat=128, nhid=256, nclass=2) # TODO
            mod.load_state_dict(torch.load(self.base_mod, map_location=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
            self.adapte_linear = mod.adapte_linear
            self.conv1 = mod.conv1
            self.conv2 = mod.conv2
            self.linear = mod.linear
            self.classicfier = mod.classicfier
        else:
            print('ERROR: GCN File Can Not Be Loaded!')
            exit(-1)
        self.to(self.device)
    

    def forward(self, data):
        data = data.to(self.device)
        file_name = data.file_name[0]
        addr_ls = data.addr_ls[0]
        x=self.adapte_linear(data.x)
        x=F.relu(x)
        for i in range(3):
            x = self.conv1[i](x,data.edge_index)
            x =  F.relu(x)
        for i in range(3):
            x = self.conv2[i](x,data.edge_index)
            x =  F.relu(x)

        # TODO replace 'for' with 'batch'
        result = []
        idx = 0
        for node in x:
            tmp = self.linear(node)
            tmp = self.classicfier(tmp)
            if idx < len(addr_ls):
                result.append([tmp, addr_ls[idx]])
                idx += 1
            else:
                result.append([tmp, -1])

        if (not os.path.exists(self.vis_file)):
            f = open(self.vis_file, 'wb')
            pkl_dict = {}
            pkl_dict[file_name] = result
            pickle.dump(pkl_dict, f)
            f.close()
        else:
            f = open(self.vis_file, 'rb')
            pkl_dict = pickle.load(f)
            pkl_dict[file_name] = result
            f.close()
            f = open(self.vis_file, 'wb')
            pickle.dump(pkl_dict, f)
            f.close()

        x = global_mean_pool(x, data.batch) 
        y = self.linear(x)
        y = self.classicfier(y)

        return y

class GCNSampleVisual(nn.Module):
    def __init__(self, base_mod, vis_info_dir, device=None, dropout=0.5):
        super(GCNSampleVisual, self).__init__()
        self.dropout = dropout
        self.vis_info_dir = vis_info_dir
        # self.nhid = nhid
        # self.nfeat = nfeat
        # self.nclass = nclass
        self.base_mod = base_mod
        self.model = None
        if device is None:
            self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        else:
            self.device = device
        if os.path.exists(self.base_mod):
            # NOTE Only be used to predefined GCN
            mod = GCN(nfeat=128, nhid=256, nclass=2) # TODO
            mod.load_state_dict(torch.load(self.base_mod, map_location=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
            self.adapte_linear = mod.adapte_linear
            self.conv1 = mod.conv1
            self.conv2 = mod.conv2
            self.linear = mod.linear
            self.classicfier = mod.classicfier
        else:
            print('ERROR: GCN File Can Not Be Loaded!')
            exit(-1)
        self.to(self.device)
    

    def forward(self, data):
        data = data.to(self.device)
        file_name = data.file_name[0].split('/')[-5:]
        target_file_name = '@'.join(file_name).replace('.json', '.pkl')
        target_file_route = os.path.join(self.vis_info_dir, target_file_name)

        addr_ls = data.addr_ls[0]
        x=self.adapte_linear(data.x)
        x=F.relu(x)
        for i in range(3):
            x = self.conv1[i](x,data.edge_index)
            x =  F.relu(x)
        for i in range(3):
            x = self.conv2[i](x,data.edge_index)
            x =  F.relu(x)

        # TODO replace 'for' with 'batch'
        result = []
        idx = 0
        for node in x:
            tmp = self.linear(node)
            tmp = self.classicfier(tmp)
            if idx < len(addr_ls):
                result.append([tmp, addr_ls[idx]])
                idx += 1
            else:
                result.append([tmp, -1])

        if (not os.path.exists(target_file_route)):
            f = open(target_file_route, 'wb')
            pkl_dict = {}
            pkl_dict[target_file_name] = result
            pickle.dump(pkl_dict, f)
            f.close()
        else:
            f = open(target_file_route, 'rb')
            pkl_dict = pickle.load(f)
            pkl_dict[target_file_name] = result
            f.close()
            f = open(target_file_route, 'wb')
            pickle.dump(pkl_dict, f)
            f.close()

        x = global_mean_pool(x, data.batch) 
        y = self.linear(x)
        y = self.classicfier(y)

        return y