import random
import torch
import numpy as np
import os
import psutil
import pynvml
from functionSim_config import *
from torch import nn
from sklearn.metrics import roc_auc_score
from datetime import datetime
import time
import shelve
from tqdm import tqdm
from functionSimModel import functionSim
from dataPre import dataPre
from functionSIm_dataPre import functionSimDataset
from torch.utils.data import DataLoader
import sys
sys.path.append(r"/home/CYW/projects/function_sim_project/basic_script")


def write_to_logs(inf):
    with open("/home/cyw/projects/function_sim_project/all_logs/functionSIm_train_logs.txt", 'a+') as file:
        data = str(datetime.now())+"\t"+inf+"\n"
        file.write(data)
        print(data)


def get_gpu_memory_usage(str="0"):
    """
        input: 标识符
        output: void
        输出当前的GPU显存占用（GB）
    """
    pynvml.nvmlInit()
    gpu_id = 0
    handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
    info = pynvml.nvmlDeviceGetMemoryInfo(handle)
    used_memory = info.used / (1024 * 1024 * 1024)
    print("{}:  显存占用 {} GB".format(str, used_memory))


def seed_everything(seed=666):
    """
        设置随机种子，结果可复现
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


class functionModelWarpper():
    def __init__(self, model=None) -> None:
        # seed_everything()
        if model == None:
            self.model = functionSim(
                embSize=embSize, wordSize=wordsize, layerSize=laySize, depth=depth)
            write_to_logs("functionSim      use_hete:{}    use_cross:{}\n".format(
                use_heterogeous, str(use_cross_interaction)+" "+cross_interaction_name))
            write_to_logs("模型参数：lr:{}    epoch:{}    laysize:{}  depth:{}    embsize:{}    margin:{}\n".format(
                lr, epoch, laySize, depth, embSize, margin))
        else:
            self.model = model

        opt = [torch.optim.Adam([{'params': self.model.parameters()},], lr=lr),
               torch.optim.SGD(self.model.parameters(), lr=lr, momentum=0.9),
               torch.optim.RMSprop(self.model.parameters(), lr=lr),
               torch.optim.Adagrad(self.model.parameters(), lr=lr),
               torch.optim.Adadelta(self.model.parameters(), lr=1.0)]

        my_loss = [nn.MSELoss(),
                   nn.CrossEntropyLoss()
                   ]
        # Triplet Loss 和 Contrastive Loss 一般在训练过程中计算

        self.optimizer = opt[0]

    def get_model_dataloader(self, datas, name, is_train, is_shuffle=True):
        """
            else_datas用于加载其它的输入,方便其他模型的对比
        """
        if is_disassemble_experiment == False:
            my_dataset = functionSimDataset(datas, name, is_train=is_train)
            dataloader = DataLoader(my_dataset, batch_size=batchSize, shuffle=is_shuffle, num_workers=16,
                                    collate_fn=my_dataset.adjust_samples_to_same_dimension)
        else:
            # 进行反汇编实验，需要切换样本地址
            if disassemble_tool not in disassemble_name:
                assert 1 == 0, "{}反汇编工具并未实现".format(disassemble_tool)
            my_dataset = functionSimDataset(datas, name, is_train=is_train)
            dataloader = DataLoader(my_dataset, batch_size=batchSize, shuffle=is_shuffle, num_workers=16,
                                    collate_fn=my_dataset.adjust_samples_to_same_dimension)
        return dataloader

    def train(self, dataloader, valid_dataloader, validValues):
        save_loss = []
        print("----------------开始训练 use hete:{} use cross:{}----------------".format(
            use_heterogeous, use_cross_interaction))
        totalLoss = "-"
        best_auc = -float("inf")
        for e in tqdm(range(epoch)):
            self.model.train()
            temploss = 0
            # a = time.time()
            for i, (query, target1, target2, target3) in enumerate(dataloader):
                torch.cuda.empty_cache()
                self.optimizer.zero_grad()

                adj_x, att_x, vtype_x = target1
                adj_y, att_y, vtype_y = target2
                adj_z, att_z, vtype_z = target3

                adj_x, adj_y, adj_z = adj_x.to(
                    device), adj_y.to(device), adj_z.to(device)
                att_x, att_y, att_z = att_x.to(
                    device), att_y.to(device), att_z.to(device)
                vtype_x, vtype_y, vtype_z = vtype_x.to(
                    device), vtype_y.to(device), vtype_z.to(device)

                score1 = self.model(adj_x, att_x, vtype_x,
                                    adj_y, att_y, vtype_y)  # B*1
                score2 = self.model(adj_x, att_x, vtype_x,
                                    adj_z, att_z, vtype_z)

                #   triplet loss
                loss = torch.relu(score2 - score1 + margin)
                loss = torch.mean(loss)

                #   contrastive loss
                #   score1 sim     score2  dissim
                #   loss = torch.mean( torch.pow(1-score1, 2) +
                #             torch.pow(torch.clamp(margin - 1 + score2, min=0.0), 2))

                temploss += loss
                loss.backward()
                self.optimizer.step()
                # print("--{}".format(i))
                # if(i%100==0):
                # b = time.time()
                # print("耗时：{}".format(b-a))
            temp_auc = self.test_model_auc(valid_dataloader, validValues)
            if show_test_auc:
                temp_auc1 = self.test_model_auc(test_dataloader, testvalues)
                write_to_logs("epoch:{} auc:{} test auc:{} loss:{}".format(
                    e, temp_auc, temp_auc1, temploss))
            else:
                write_to_logs("epoch:{} auc:{} loss:{}".format(
                    e, temp_auc, temploss))
            if temp_auc > best_auc:
                best_auc = temp_auc
                if quick_train == False:
                    if is_adjust_params:
                        torch.save(self.model, r'/home/cyw/projects/function_sim_project/all_data/models/functionSim_model_{}_{}.pth'.format(
                            tar_params_name, tar_params_value))
                    else:
                        if is_disassemble_experiment == False:
                            if use_cross_interaction and use_heterogeous:
                                torch.save(
                                    self.model, r'/home/cyw/projects/function_sim_project/all_data/models/functionSim_model_{}.pth'.format(cross_interaction_name))
                            elif use_cross_interaction:
                                torch.save(
                                    self.model, r'/home/cyw/projects/function_sim_project/all_data/models/functionSim_with_cross_model.pth')
                            elif use_heterogeous:
                                torch.save(
                                    self.model, r'/home/cyw/projects/function_sim_project/all_data/models/functionSim_with_hete_model.pth')
                            else:
                                torch.save(
                                    self.model, r'/home/cyw/projects/function_sim_project/all_data/models/functionSim_zero_model.pth')
                        else:
                            torch.save(
                                self.model, r'/home/cyw/projects/function_sim_project/all_data/models/{}_model.pth'.format(disassemble_tool))
                    write_to_logs("模型已保存")
                else:
                    write_to_logs("quick train，不保存模型")
            totalLoss += str(float(temploss))
            totalLoss += "-"
        if is_adjust_params:
            with open("/home/cyw/projects/function_sim_project/all_data/indicators/lossdata/functionSimLoss.txt", 'a+') as file:
                file.write("模型参数{}  参数值{}    loss情况如下：\n".format(
                    tar_params_name, tar_params_value))
                file.write(totalLoss+"\n")
            write_to_logs(
                "训练loss参数已保存   /home/cyw/projects/function_sim_project/all_data/indicators/lossdata/functionSimLoss.txt")
        print("训练完成")

    def test_model_auc(self, dataloader, values):
        ans = self.get_model_score(dataloader)
        auc_score = roc_auc_score(values, ans)
        return auc_score

    def get_model_score(self, dataloader):
        """
            输入指定格式数据，通过训练好的模型得到相应的得分
            把dataloader输入进来，方便之前做数据分布的处理
            不为train了，则数据只有前三个是有用的，第四个出现值说明代码错误
        """
        self.model.eval()
        with torch.no_grad():
            res = []
            for i, (query, target1, target2, target3) in enumerate(dataloader):
                torch.cuda.empty_cache()
                self.optimizer.zero_grad()
                adj_x, att_x, vtype_x = target1
                adj_y, att_y, vtype_y = target2
                adj_x = adj_x.to(device)
                adj_y = adj_y.to(device)
                att_x = att_x.to(device)
                att_y = att_y.to(device)
                vtype_x = vtype_x.to(device)
                vtype_y = vtype_y.to(device)
                score1 = self.model(adj_x, att_x, vtype_x,
                                    adj_y, att_y, vtype_y)  # B*1
                res.append(score1)
            ans = torch.cat((res))
        return ans.tolist()


# def search_params():


"""
        1. 增加训练的参数
            w矩阵，全连接网络不重复使用                     
            ------>部分直接相加的增加了全连接网络，w矩阵感觉都存在
        2. 重新调参
            感觉会调整的很慢
        3. 修改异质聚合方式（R_GCN）                        
            使用更加新的hetGNN的邻居聚合和类型聚合（BiLSTM）
        4. 生成图级嵌入
            不简单聚合函数结点信息，使用set2set        

"""
if __name__ == "__main__":
    """
        nohup python /home/cyw/projects/function_sim_project/function_sim/functionSim_train.py
    """
    datapre = dataPre()
    if is_disassemble_experiment == False:
        data_pair_infs = datapre.get_pair_infs()  # 获得准备好的训练样本对
    else:
        data_pair_infs = datapre.get_pair_infs(
            selectPath=r"/home/cyw/projects/function_sim_project/all_data/EECG_pair_infs/dis_pair_infs")

    testvalues, validvalues = [], []
    for i in range(len(data_pair_infs["test"])):
        testvalues.append(data_pair_infs["test"][i][2])
    for i in range(len(data_pair_infs["valid"])):
        validvalues.append(data_pair_infs["valid"][i][2])

    if is_adjust_params == True:
        write_to_logs("functionSim  开始搜索参数")
        params = {}
        # params["lr"]=[1,0.1,0.03,0.01,0.003,0.001,0.0003,0.0001,0.00003,0.00001]
        # params["epoch"]=[2,4,8,16,64,128,32]
        # params["laySize"]=[1,3,4,5,2] 1
        # params["depth"]=[1,3,4,5,2]  3
        # params["embSize"]=[32,64,256,128]
        # 细微调整一下：
        # params["lr"]=[0.001,0.0003,0.0001,0.00003]
        # params["laySize"]=[1,3,2]
        # params["depth"]=[1,3,2]
        params["margin"] = [0.4, 0.7, 1, 1.3]

        for param in params.keys():
            tar_params_name = param
            print("正在搜索{}参数".format(tar_params_name))
            for value in params[param]:
                tar_params_value = value
                if tar_params_name == "lr":
                    lr = value
                elif tar_params_name == "epoch":
                    epoch = value
                elif tar_params_name == "embSize":
                    embSize = value
                elif tar_params_name == "laySize":
                    laySize = value
                elif tar_params_name == "depth":
                    depth = value
                elif tar_params_name == "margin":
                    margin = value
                #   初始化的时候会使用全局变量
                funcTrainer = functionModelWarpper()
                validdataloader = funcTrainer.get_model_dataloader(
                    data_pair_infs["valid"], "valid", is_train=True)
                testdataloader = funcTrainer.get_model_dataloader(
                    data_pair_infs["test"], "test", is_train=False, is_shuffle=False)
                funcTrainer.train(validdataloader, testdataloader, testvalues)
    else:
        if quick_train == True:
            print("正在快速验证模型性能")
            funcTrainer = functionModelWarpper()
            # 这里没法使用更小的样本集合了，主要是懒得写逻辑判断了，，，，这里需要转换成3元组
            train_dataloader = funcTrainer.get_model_dataloader(
                data_pair_infs["test"], "test", is_train=True)
            valid_dataloader = funcTrainer.get_model_dataloader(
                data_pair_infs["valid"], "valid", is_train=False, is_shuffle=False)
            test_dataloader = funcTrainer.get_model_dataloader(
                data_pair_infs["test"], "test", is_train=False, is_shuffle=False)
            funcTrainer.train(train_dataloader, valid_dataloader, validvalues)
        elif is_disassemble_experiment == True:
            # 这里写反汇编实验的逻辑，感觉有点乱了
            print("反汇编实验")
            print("funtionSim基于{}工具处理数据，训练模型\n".format(disassemble_tool))
            funcTrainer = functionModelWarpper()
            train_dataloader = funcTrainer.get_model_dataloader(
                data_pair_infs["train"], "train", is_train=True)
            valid_dataloader = funcTrainer.get_model_dataloader(
                data_pair_infs["valid"], "valid", is_train=False, is_shuffle=False)
            test_dataloader = funcTrainer.get_model_dataloader(
                data_pair_infs["test"], "test", is_train=False, is_shuffle=False)
            funcTrainer.train(train_dataloader, valid_dataloader, validvalues)
        else:
            print("funtionSim 正在训练模型")
            funcTrainer = functionModelWarpper()
            test_dataloader = funcTrainer.get_model_dataloader(
                data_pair_infs["test"], "test", is_train=False, is_shuffle=False)
            # test_dataloader=funcTrainer.get_model_dataloader(data_pair_infs["test"], "test", is_train=True)
            train_dataloader = funcTrainer.get_model_dataloader(
                data_pair_infs["train"], "train", is_train=True)
            valid_dataloader = funcTrainer.get_model_dataloader(
                data_pair_infs["valid"], "valid", is_train=False, is_shuffle=False)
            funcTrainer.train(train_dataloader, valid_dataloader, validvalues)
            # funcTrainer.train(test_dataloader, valid_dataloader, validvalues)
