from HGMSimDataSet import HGMSimDataset
from torch.utils.data import  DataLoader
import sys
sys.path.append(r"/home/cyw/projects/malware_detected/HGMSim/basic_script")
from basic_script.dataPre import dataPre
from functionSimModel import functionSim
import torch  
from tqdm import tqdm
from datetime import datetime
from sklearn.metrics import roc_auc_score
from torch import nn
from functionSim_config import *
import pynvml
import random 
import numpy as np
import time
import psutil
from memory_profiler import profile


def write_to_logs(inf):
    with open("/home/cyw/projects/function_sim_project/all_logs/functionSIm_train_logs.txt",'a+') as file:
        data=str(datetime.now())+"\t"+inf+"\n"
        file.write(data)
        print(data)
        
# @profile
class functionModelWarpper():
    def __init__(self,model=None) -> None:
        if model==None:
            self.model=functionSim(embSize=embSize,wordSize=wordsize,layerSize=laySize,depth=depth)
            write_to_logs("functionSim      use_hete:{}    use_cross:{}\n".format(use_heterogeous,str(use_cross_interaction)+" "+cross_interaction_name))
            write_to_logs("模型参数：lr:{}    epoch:{}    laysize:{}  depth:{}    embsize:{}    margin:{}\n".format(lr,epoch,laySize,depth,embSize,margin))
        else:
            self.model=model

        self.optimizer = torch.optim.Adam([{'params': self.model.parameters()},], lr=lr)
        self.loss = nn.BCELoss()

    # @profile 
    def train(self,dataloader,testDataloader):
        print("----------------开始训练 use hete:{} use cross:{}----------------".format(use_heterogeous,use_cross_interaction))
        best_auc=-float("inf")
        cnt=-1
        for e in tqdm(range(epoch)):
            self.model.train()
            temploss=0
            for i, (label, sample) in enumerate(dataloader):
                torch.cuda.empty_cache()
                self.optimizer.zero_grad()
                adj_x,att_x,vtype_x = sample[0], sample[1], sample[2]
                adj_x,att_x,vtype_x = adj_x.to(device), att_x.to(device), vtype_x.to(device)
                label = label.to(device)
                score=self.model(adj_x,att_x,vtype_x)
                loss = self.loss(score, label.unsqueeze(1))
                temploss+=loss
                loss.backward()
                self.optimizer.step()
            cnt+=1
            print(temploss)
            if cnt%5==0:
                print("train:")
                self.test_model_auc(dataloader)
                print("test:")
                tmp_auc = self.test_model_auc(testDataloader)
                if tmp_auc > best_auc:
                    best_auc = tmp_auc
                # 增加保存条件
                torch.save(self.model, r'/home/cyw/projects/malware_detected/saveData/model/HGMSim.pth')
                write_to_logs("模型已保存")
        print("训练完成")
        
    def test_model_auc(self,dataloader):
        ans,gt=self.get_model_score(dataloader)
        auc_score = roc_auc_score(gt, ans)
        c = [1 if value > 0.5 else 0 for value in ans] 
        num_ones = sum(gt)
        FN = sum(1 for pred, actual in zip(c, gt) if pred == 0 and actual == 1)
        FP = sum(1 for pred, actual in zip(c, gt) if pred == 1 and actual == 0)
        print("FN:{:.2f}  FP:{:.2f} auc:{:.2f}".format(FN,FP,auc_score))      
        recall = (num_ones-FN) / num_ones
        precision = (num_ones - FN) / (num_ones - FN + FP)
        print("recall:{:.2f}  1- precision:{:.2f}".format(recall, 1 - precision))
        
        return auc_score

    def get_model_score(self,dataloader):
        """
            输入指定格式数据，通过训练好的模型得到相应的得分
            把dataloader输入进来，方便之前做数据分布的处理
            不为train了，则数据只有前三个是有用的，第四个出现值说明代码错误
        """
        # 获取当前进程的内存使用情况
        # process = psutil.Process()
        self.model.eval()
        with torch.no_grad():
            res = []
            labels = []
            for i, (label, sample) in enumerate(dataloader):
                torch.cuda.empty_cache()
                self.optimizer.zero_grad()
                adj_x,att_x,vtype_x = sample[0], sample[1], sample[2]
                adj_x,att_x,vtype_x = adj_x.to(device), att_x.to(device), vtype_x.to(device)
                label = label.to(device)
                score=self.model(adj_x,att_x,vtype_x)
                # 这里不加1的，在多batchSize的情况下不会报错,但是loss无法下降，不知道为啥
                res.append(score.squeeze(1))
                labels.append(label)
                # info = process.memory_full_info()
                # 打印当前进程的实际内存使用情况
                # if i%100==0:
                    # print("计算第{}个样本时的内存占用:{:.2f}MB".format(i,(info.uss / 1024 ** 2)))
            ans=torch.cat((res)).tolist()
            gt = torch.cat((labels)).tolist()
        return ans,gt

# 展示写来检测内存占用，之后删除
# @profile
def detect_memory_use(dataloader):
    """
        输入指定格式数据，通过训练好的模型得到相应的得分
        把dataloader输入进来，方便之前做数据分布的处理
        不为train了，则数据只有前三个是有用的，第四个出现值说明代码错误
    """
    # 获取当前进程的内存使用情况
    # process = psutil.Process()
    model=functionSim(embSize=embSize,wordSize=wordsize,layerSize=laySize,depth=depth)
    res = []
    labels = []
    for j in range(1):
        model.eval()
        torch.cuda.empty_cache()
        for i, (label, sample) in enumerate(dataloader): 
            adj_x,att_x,vtype_x = sample[0], sample[1], sample[2]
            adj_x,att_x,vtype_x = adj_x.to(device), att_x.to(device), vtype_x.to(device)
            label = label.to(device)
            score=model(adj_x,att_x,vtype_x)
            # 这里不加1的，在多batchSize的情况下不会报错,但是loss无法下降，不知道为啥
            res.append(score.squeeze(1))
            labels.append(label)
            # info = process.memory_full_info()
            # 打印当前进程的实际内存使用情况
            # if i%100==0:
                # print("计算第{}个样本时的内存占用:{:.2f}MB".format(i,(info.uss / 1024 ** 2)))
    ans=torch.cat((res)).tolist()
    gt = torch.cat((labels)).tolist()
    return ans,gt
    
def hyperParamsAdjust(trainDataloader,testDataloader):
    """
        超参数调整
    """
    tars={}
    # tars["learning_rate"] = [0.00001,0.000003,0.000001]
    tars["laySize"] = [1,2,3,4,5]
    tars["depth"] = [1,2,3,4,5]
    
    for ind in range(len(tars["laySize"])):
        funcTrainer = functionModelWarpper()
        print("调整层数：{}".format(tars["laySize"][ind]))
        funcTrainer.optimizer = torch.optim.Adam([{'params': funcTrainer.model.parameters()},], lr=tars["learning_rate"][ind])
        funcTrainer.train(trainDataloader,testDataloader)
# @profile
def run():
    print("开始加载测试数据")
    # inputPaths = ["/home/cyw/projects/malware_detected/saveData/graphData/train_and_test/test_final/benign",
    #               "/home/cyw/projects/malware_detected/saveData/graphData/train_and_test/test_final/malware"]
    # is_malware = [False,True]
    inputPaths = ["/home/cyw/projects/malware_detected/saveData/graphData/train_and_test/test_final/test"]
    is_malware = [True]
    test_dataset = HGMSimDataset(inputPaths,is_malware,"test")
    testDataloader = DataLoader(test_dataset, batch_size=batchSize, shuffle = True, num_workers=40,\
                        collate_fn=test_dataset.adjust_samples_to_same_dimension)
    
    # print("开始加载训练数据")
    # inputPaths = ["/home/cyw/projects/malware_detected/saveData/graphData/train_and_test/train_final/benign",
    #               "/home/cyw/projects/malware_detected/saveData/graphData/train_and_test/train_final/malware"]
    # is_malware = [False,True]
    # train_dataset = HGMSimDataset(inputPaths,is_malware,"train")
    # trainDataloader = DataLoader(train_dataset, batch_size=batchSize, shuffle = True, num_workers=40,\
    #                         collate_fn=train_dataset.adjust_samples_to_same_dimension)
    

    funcTrainer = functionModelWarpper()
    # funcTrainer.train(trainDataloader,testDataloader)
    a = time.time()
    detect_memory_use(testDataloader)
    b=time.time()
    print(b-a)
    # hyperParamsAdjust(trainDataloader,testDataloader)

    # # process = psutil.Process()
    # # info = process.memory_full_info()
    # # print(f"模型计算前内存占用: {info.uss / 1024 ** 2:.2f} MB")
    # a = time.time()
    # funcTrainer.get_model_score(testDataloader)
    # # info = process.memory_full_info()
    # # print(f"模型计算后的内存占用: {info.uss / 1024 ** 2:.2f} MB")
    # b= time.time()
    # print("检测{}个样本，累计耗时：{}".format(len(test_dataset),b-a))
    # print("内存消耗")
    # print("测试所使用的硬件情况")

if __name__=="__main__":
    run()