#To do:
#1.从npy文件中提取数据（node_feature,label）
#2.使用GP算法进行训练，并记录recall，top10~90 accuracy指标
#3.测试
import numpy as np
import os
import re
import matplotlib.pyplot as plt
# from sklearn.utils import check_random_state
import joblib
import csv
import torch
from SR_model import SPL
from sklearn.linear_model import Ridge
from numpy import *
from sklearn.metrics import mean_squared_error
import time
import sys
sys.path.append('../GNN')
from o5_get_score import get_score
from o5_get_score_resub import get_resub_score
# from pydotplus.graphviz import graph_from_dot_data
# TODO:
# 1.记录est_gp的multi domain情况
# 2.记录est_gp的single_domain情况 
# 3.记录est_gp_lut的multi_domain情况
# 4.记录est_gp_lut的single_domain情况
# 5.记录时est_gp_no_lut，est_gp_lut和est_gp_no_lut+0.25*est_gp_lut均存储topk和0.5分类情况
class Trainer():
    def __init__(
        self,
        seed = None,
        npy_data_train_path = None,
        npy_data_test_path = None,
        search_method = None, #MCTS/DSR/GPLearn
        normalize = None, #True/False
        feature_selection = None, # lut/no_lut/all
        loss_type = None,
        GNN_enhanced = None,
        gcnmodel = None,
        # SPL
        num_run = None,
        transplant_step = None,
        motif_library = None,
        max_len = None,
        epsilon = None,
        # DSR
        n_samples = None,
        batch_size = None,
        # SVD
        SVD_symbolic_enhanced = None,
        SVD_selected_features = None,
        # heuristics operator
        operator = None
    ):
        self.seed = seed
        self.npy_data_train_path = npy_data_train_path
        self.npy_data_test_path = npy_data_test_path
        self.search_method = search_method
        self.normalize = normalize
        self.feature_selection = feature_selection
        self.loss_type = loss_type
        self.GNN_enhanced = GNN_enhanced
        self.gcnmodel = gcnmodel
        self.num_run = num_run
        self.transplant_step = transplant_step
        self.motif_library = motif_library
        self.max_len = max_len
        self.epsilon = epsilon
        self.n_samples = n_samples
        self.batch_size = batch_size
        self.SVD_symbolic_enhanced = SVD_symbolic_enhanced
        self.SVD_selected_features = SVD_selected_features
        self.operator = operator

    def train(self):
        if self.GNN_enhanced: # only for no_lut
            x_train, y_train = self.get_GNN_enhanced_data(self.npy_data_train_path, self.gcnmodel)
        else:
            x_train, y_train = self.get_data(self.npy_data_train_path)
            # if self.normalize:
            #     x_train = self.normalization(x_train)
        print('the raw x_train is', x_train[0,:])
        self.npy_file_name = self.get_npy_file_name(self.npy_data_test_path)
        x_test, y_test = self.get_data(self.npy_data_test_path)
        # if self.normalize:
        #     x_test = self.normalization(x_test)
        if self.operator == 'mfs2':
            if self.feature_selection == 'no_lut':     
                x_train = x_train[:,:5]
                x_test = x_test[:,:5]
            elif self.feature_selection == 'lut':
                x_train = x_train[:, 5:]
                x_test = x_test[:, 5:]
                x_train = self.process(x_train)
                # x_train = x_train[np.where(y_train==1)[0], :]
                # y_train = y_train[np.where(y_train==1)[0]]
                x_test = self.process(x_test)
            elif self.feature_selection == 'all':
                pass
        print('train_positive_num', len(np.where(y_train>0)[0]))
        print('train_negative_num', len(np.where(y_train==0)[0]))
        print('test_positive_num', len(np.where(y_test>0)[0]))
        print('test_negative_num', len(np.where(y_test==0)[0])) 
        print('x_train', x_train[0:5,:])
        print('x_test', x_test[0:5,:])
        print('y_train', y_train)
        print('y_test', y_test)
        print('the max number of y_train is', np.max(y_train))
        # SR
        start_time = time.time()
        if self.search_method == 'MCTS':
            spl = SPL(x_train, y_train, x_test, y_test, task = self.npy_file_name, num_run=self.num_run, transplant_step=self.transplant_step, motif_library = self.motif_library, max_len = self.max_len, loss_type=self.loss_type, epsilon=self.epsilon, feature_selection=self.feature_selection)
            SR_agents, error_rates, success_rate = spl.process()
            print('the training error rates are ', error_rates)
            print('the success rate is', success_rate)
            print('SR agents are', SR_agents)
            for SR_agent in SR_agents:
                self.evaluate(SR_agent, x_test, y_test, x_train)
            SR_agent = SR_agents[error_rates.index(min(error_rates))] # 多个最小error_rates下选序列前面的
            print('The index of the best MCTS_agent is', error_rates.index(min(error_rates)))
        elif self.search_method == 'Random':
            np.random.seed(self.seed)
            SR_agent = np.random.rand(len(y_test))
        elif self.search_method == 'SVD':
            SR_agent = self.SVD_model(x_train)
        elif self.search_method == 'SVD_symbolic':
            SR_agent = self.SVD_model(x_train)
        end_time = time.time()
        print('The all training time is', end_time - start_time)
        if self.operator == 'mfs2':
            save_dir = f'../../../models/baseline/seed{self.seed}'
        elif self.operator == 'resub':
            save_dir = f'../../../models/baseline/resub'
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        joblib.dump(SR_agent, os.path.join(save_dir, f'{self.search_method}_model_{self.npy_file_name}_loss_{self.loss_type}_seed{self.seed}.pkl'))
        self.evaluate(SR_agent, x_test, y_test, x_train)

    def get_npy_file_name(self, npy_data_test_path):
        for file in os.listdir(npy_data_test_path):
            pattern = r'save_data_total_(.*?)\.'
            match = re.search(pattern, file)
        return match.group(1)

    def print_evaluation(self, y_true, y_pred, method_type, loss_type):
        if loss_type == 'mse':
            loss = mean_squared_error(y_true, y_pred)
            print(f'the {loss_type} loss of {method_type} is', loss)
        
    def get_data(self, npy_data_path):
        x = []
        y = []
        for npy_file in os.listdir(npy_data_path):
            npy_file_path = os.path.join(npy_data_path, npy_file)
            data = np.load(npy_file_path, allow_pickle=True).item()
            if self.normalize:
                x_data = self.normalization(data['features_list'][0])
            x.append(x_data)
            y.append(data['labels_list'][0])
        x = np.vstack(x).astype(np.float32)
        y = np.vstack(y).astype(np.float32).reshape(-1,)
        return x, y

    def get_GNN_enhanced_data(self, npy_data_path, gcnmodel):
        x = []
        y = []
        for npy_file in os.listdir(npy_data_path):
            npy_file_path = os.path.join(npy_data_path, npy_file)
            data = np.load(npy_file_path, allow_pickle=True).item()
            x.append(data['features_list'][0])
            y.append(data['labels_list'][0])
        x = np.vstack(x).astype(np.float32)
        # if self.batch_normalized == False:

        # x_test = x
        # x, x_test = self.normalization(x, x_test)
        # x = x[:,:5]
        y = np.vstack(y).astype(np.float32).reshape(-1,)
        if self.operator == 'mfs2':
            features, labels, predict_scores = get_score(npy_data_path, gcnmodel=gcnmodel, save_path=None, normalize_type=self.normalize, feature_selection=self.feature_selection)
        elif self.operator == 'resub':
            features, labels, predict_scores = get_resub_score(npy_data_path, gcnmodel=gcnmodel, save_path=None, normalize_type=False)
            if self.normalize:
                features = self.normalization(features)
        print('the max number of predict_scores is', np.max(predict_scores))
        # feature = features
        # features, feature = self.normalization(features, feature)
        x = features.astype(np.float32)
        # x = x[:,:5]
        # x = self.normalization(x)
        y = labels.reshape(-1,) + predict_scores.reshape(-1,)
        return x, y
        # x = []
        # y = []
        # y_labels = []
        # for npy_file in os.listdir(npy_data_path):
        #     npy_file_path = os.path.join(npy_data_path, npy_file)
        #     data = np.load(npy_file_path, allow_pickle=True).item()
        #     x.append(data['features'])
        #     y.append(data['prediction'])
        #     y_labels.append(data['labels'])
        # x = np.vstack(x).astype(np.float32)
        # y = np.vstack(y).astype(np.float32)
        # y_labels = np.vstack(y_labels).astype(np.float32)
        # if self.loss_type == 'GLNN':
        #     y = y + y_labels
        #     return x, y
        # if self.loss_type == 'focal':
        #     return x, y_labels

    def evaluate(self, SR_agent, x_test, y_test, x_train):
        if self.search_method == 'MCTS':
            y_prediction = self.score_for_MCTS(SR_agent, x_test)
            print('y_prediction is', y_prediction)
            if self.feature_selection != 'lut':
                y_prediction = self.sigmoid(y_prediction)
        elif self.search_method == 'Random':
            y_prediction = SR_agent
        elif self.search_method == 'SVD':
            y_prediction = self.SVD_sort(SR_agent, x_test)
            y_prediction = self.sigmoid(y_prediction)
        elif self.search_method == 'SVD_symbolic':
            x_test = x_test[:,self.SVD_selected_features]
            y_prediction = self.SVD_sort(SR_agent, x_test)
            y_prediction = self.sigmoid(y_prediction)
        y_prediction_indexes = np.argsort(y_prediction, kind = 'stable')
        y_prediction_sort = np.sort(y_prediction)#升序排列后的数值
        print('y_prediction_sort:', y_prediction_sort)
        print('y_prediction_indexes:', y_prediction_indexes)
        print('positive num', len(np.where(y_test)[0]))
        print('SR equation:', SR_agent)
        total_num = len(y_prediction)
        self.get_information(SR_agent, total_num, y_prediction_indexes, y_test, y_prediction)
    
    @classmethod
    def score_for_MCTS(cls, eq, x_test):
        ## define independent variables and dependent variable
        num_var = len(x_test[0])
        for i in range(num_var):
            globals()[f'x_{i}'] = x_test[:,i]
        f_pred = eval(eq)
        f_pred = f_pred.astype(np.float32)
        return f_pred

    # def normalize(self, vector):
    #     max_vals = np.max(vector, axis=0)
    #     min_vals = np.min(vector, axis=0)
    #     normalized_vector = (vector - min_vals) / \
    #         (max_vals - min_vals + 1e-3)
    #     return normalized_vector 

    @classmethod
    def normalization(cls, x):
        def normalize(vector):
            max_vals = np.max(vector, axis=0)
            min_vals = np.min(vector, axis=0)
            normalized_vector = (vector - min_vals) / \
                (max_vals - min_vals + 1e-3)
            return normalized_vector
        if not np.all(x<=1):
            x = normalize(x)
        return x

    # def normalization_no_lut(self, vector):
    #     if np.min(vector) < 0:
    #         vector = vector - np.min(vector)
    #         vector = vector / vector.max(axis=0)
    #     else:
    #         vector = vector / vector.max(axis=0)
    #     return vector
    def SVD_model(self, x_train):
        if self.SVD_symbolic_enhanced == True:
            x_train = x_train[:, self.SVD_selected_features]
        n_components = 3
        svd_u, svd_s, svd_vh = np.linalg.svd(x_train)
        svd_vd = svd_vh[:n_components, :]
        svd_s = svd_s / np.sum(svd_s)
        svd_s = svd_s[:n_components]
        svd_model = {
            'n_conponents': 3,
            'svd_s': svd_s,
            'svd_vd': svd_vd
        }
        return svd_model

    @classmethod
    def SVD_sort(cls, svd_model, x_test):
        #sub_prediction_indexes存储的是每个元素在y_prediction中的index
        #x_train是训练数据集
        #x_test是测试训练集
        #SVD_sort:对X_train进行SVD分解，并取前n个主成分向量，x_test[sub_prediction_indexes]的每个向量与主成分向量做内积并以奇异值作为权重求和，
        #返回的new_sub_prediction_indexes为上述求和的输出值从小到大的index排序
        # n_components = 10
        # svd_u, svd_s, svd_vh = np.linalg.svd(x_train)
        # svd_vd = svd_vh[:n_components, :]
        # svd_s = svd_s / np.sum(svd_s)
        # svd_s = svd_s[:n_components]
        svd_vd = svd_model['svd_vd']
        svd_s = svd_model['svd_s']
        # sub_x_test = x_test[list(sub_prediction_indexes), :]
        ds = np.zeros((x_test.shape[0],1))
        coeff = -1
        for i in range(svd_vd.shape[0]):
            cur_ds = coeff * np.linalg.norm(x_test - svd_vd[i:i+1,:], axis=1).reshape(-1,1) # N*1
            cur_ds = cur_ds * svd_s[i]
            ds += cur_ds
        scores = ds.reshape(-1)#score(负数）越小相似度越低，顺位越靠前
        return scores 

    def process(self, x_train):
        x_train[x_train!=0] = 1
        return x_train

    def sigmoid(self, x):
        return 1/(1+np.exp(-x))
    
    def get_information(self, SR_agent, total_num, y_prediction_indexes, y_test, y_prediction):
        def get_confusion_matrix(sel_indexes, non_sel_indexes, y_test):
            positive_indexes = np.nonzero(y_test)[0]
            negative_indexes = np.nonzero(y_test == 0)[0]
            
            predict_positive_indexes = sel_indexes
            predict_negative_indexes = non_sel_indexes
            
            tp=0
            tn=0
            fp=0
            fn=0
            tp_indexes = list(set(predict_positive_indexes).intersection(positive_indexes))
            tp = len(tp_indexes)
            fp = len(predict_positive_indexes) - tp 

            tn_indexes = list(set(predict_negative_indexes).intersection(negative_indexes))
            tn = len(tn_indexes)
            fn = len(predict_negative_indexes) - tn
            return tp, tn, fp, fn
        def get_num_50_confusion_matrix(y_prediction, y_test):
            positive_indexes = np.where(y_test == 1)[0]
            negative_indexes = np.where(y_test == 0)[0]
            
            predict_positive_indexes = np.where(y_prediction >= 0.5)[0]
            predict_negative_indexes = np.where(y_prediction < 0.5)[0]
            
            tp=0
            tn=0
            fp=0
            fn=0
            tp_indexes = list(set(predict_positive_indexes).intersection(positive_indexes))
            tp = len(tp_indexes)
            fp = len(predict_positive_indexes) - tp 

            tn_indexes = list(set(predict_negative_indexes).intersection(negative_indexes))
            tn = len(tn_indexes)
            fn = len(predict_negative_indexes) - tn
            return tp, tn, fp, fn
        top_k_acc = []
        top_k_tp = []
        top_k_tn = []
        top_k_fp = []
        top_k_fn = []
        for k in range(9):
            percent = (k+1)*0.1
            sel_num = int(percent * total_num)
            sel_indexes = y_prediction_indexes[total_num-sel_num:]
            non_sel_indexes = y_prediction_indexes[:total_num-sel_num]
            tp_topk, tn_topk, fp_topk, fn_topk = get_confusion_matrix(sel_indexes, non_sel_indexes, y_test)
            accuracy = tp_topk / (tp_topk+fn_topk)
            print(f'tp,tn,fp,fn:', tp_topk, tn_topk, fp_topk, fn_topk)
            print(f'tp+fp', tp_topk+fp_topk)
            print(f'top{k+1}0% accuracy:', accuracy)
            top_k_acc.append(accuracy)
            top_k_tp.append(tp_topk)
            top_k_tn.append(tn_topk)
            top_k_fp.append(fp_topk)
            top_k_fn.append(fn_topk)           
        tp_50, tn_50, fp_50, fn_50 = get_num_50_confusion_matrix(y_prediction, y_test)
        accuracy_50 = (tp_50+tn_50)/total_num
        recall_50 = tp_50/(tp_50+fn_50)
        positive_ratio = (tp_50+fp_50)/total_num
        print('tp_50,tn_50,fp_50,fn_50:', tp_50, tn_50, fp_50, fn_50)
        print('accuarcy_50', accuracy_50)
        print('recall_50', recall_50)
        print('positive_ratio', positive_ratio)
        self.save_csv(top_k_acc, top_k_tp, top_k_tn, top_k_fp, top_k_fn, tp_50, tn_50, fp_50, fn_50, total_num, SR_agent)

    def save_csv(self, top_k_acc, top_k_tp, top_k_tn, top_k_fp, top_k_fn, tp_50, tn_50, fp_50, fn_50, total_num, SR_agent):
        # low_name = os.listdir(self.test_blif_path)
        # low_name = low_name[:low_name.find(".")]
        SR_agent = str(SR_agent)
        with open(f'./{self.npy_file_name}_{self.search_method}_normalize_{self.normalize}_feature_selection_{self.feature_selection}.csv', 'a', newline='') as csvfile:
            fieldnames = ['Top_k', 'Top_k_acc', 'Top_k_tp',
                        'Top_k_tn', 'Top_k_fp', 'Top_k_fn', 
                        'num_50_acc', 'num_50_recall', 'num_50_tp', 'num_50_tn', 'num_50_fp', 
                        'num_50_fn', 'equation']
            writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
            writer.writeheader()
            for k in range(9):          
                if k==0:
                    writer.writerow({'Top_k': f'top{k+1}0%', 
                                    'Top_k_acc': f'{top_k_acc[k]}',
                                    'Top_k_tp': f'{top_k_tp[k]}', 
                                    'Top_k_tn': f'{top_k_tn[k]}', 
                                    'Top_k_fp': f'{top_k_fp[k]}', 
                                    'Top_k_fn': f'{top_k_fn[k]}',
                                    'num_50_acc': f'{(tp_50+tn_50)/total_num}', 
                                    'num_50_recall': f'{tp_50/(tp_50+fn_50)}', 
                                    'num_50_tp':f'{tp_50}', 
                                    'num_50_tn':f'{tn_50}', 
                                    'num_50_fp':f'{fp_50}', 
                                    'num_50_fn':f'{fn_50}', 
                                    'equation':f'{SR_agent}',
                                    })
                else: 
                    writer.writerow({'Top_k': f'top{k+1}0%', 
                                    'Top_k_acc': f'{top_k_acc[k]}',
                                    'Top_k_tp': f'{top_k_tp[k]}', 
                                    'Top_k_tn': f'{top_k_tn[k]}', 
                                    'Top_k_fp': f'{top_k_fp[k]}', 
                                    'Top_k_fn': f'{top_k_fn[k]}',
                                    })

    def get_num_50_confusion_matrix(self, y_prediction, y_test):
        positive_indexes = np.where(y_test == 1)[0]
        negative_indexes = np.where(y_test == 0)[0]
        
        predict_positive_indexes = np.where(y_prediction>=0.5)[0]
        predict_negative_indexes = np.where(y_prediction<0.5)[0]
        
        tp=0
        tn=0
        fp=0
        fn=0
        tp_indexes = list(set(predict_positive_indexes).intersection(positive_indexes))
        tp = len(tp_indexes)
        fp = len(predict_positive_indexes) - tp 

        tn_indexes = list(set(predict_negative_indexes).intersection(negative_indexes))
        tn = len(tn_indexes)
        fn = len(predict_negative_indexes) - tn
        return tp, tn, fp, fn
    
    def get_confusion_matrix(self, sel_indexes, non_sel_indexes, y_test):
        positive_indexes = np.nonzero(y_test)[0]
        negative_indexes = np.nonzero(y_test == 0)[0]
        
        predict_positive_indexes = sel_indexes
        predict_negative_indexes = non_sel_indexes
        
        tp=0
        tn=0
        fp=0
        fn=0
        tp_indexes = list(set(predict_positive_indexes).intersection(positive_indexes))
        tp = len(tp_indexes)
        fp = len(predict_positive_indexes) - tp 

        tn_indexes = list(set(predict_negative_indexes).intersection(negative_indexes))
        tn = len(tn_indexes)
        fn = len(predict_negative_indexes) - tn
        return tp, tn, fp, fn

    @classmethod
    def evaluate_baseline(cls, SR_agent, x_test, y_test, search_method, feature_selection):
        def sigmoid(x):
            return 1 / (1 + np.exp(-x))
        def score_for_MCTS(eq, x_test):
            ## define independent variables and dependent variable
            num_var = len(x_test[0])
            for i in range(num_var):
                globals()[f'x_{i}'] = x_test[:,i]
            f_pred = eval(eq)
            f_pred = f_pred.astype(np.float32)
            if feature_selection != 'lut':
                f_pred = sigmoid(f_pred)
            return f_pred
        if search_method == 'MCTS':
            y_prediction = score_for_MCTS(SR_agent, x_test)
        elif search_method == 'GPLearn':
            y_prediction = SR_agent._program.execute(x_test)
            y_prediction = sigmoid(y_prediction)
        elif search_method == 'DSR':
            y_prediction = SR_agent.predict(x_test)
            y_prediction = sigmoid(y_prediction)
            SR_agent = SR_agent.result
        elif search_method in ['DT', 'LR', 'SVM', 'RidgeLR', 'xgboost', 'lightGBM']:
            y_prediction = SR_agent.predict(x_test)
        elif search_method == 'Random':
            y_prediction = SR_agent
        elif search_method == 'SVD':
            y_prediction = cls.SVD_sort(SR_agent, x_test)
            y_prediction = sigmoid(y_prediction)
        elif search_method == 'SVD_symbolic':
            x_test = x_test[:,cls.SVD_selected_features]
            y_prediction = cls.SVD_sort(SR_agent, x_test)
            y_prediction = sigmoid(y_prediction)
        y_prediction_indexes = np.argsort(y_prediction, kind = 'stable')
        y_prediction_sort = np.sort(y_prediction)#升序排列后的数值
        print('y_prediction_sort:', y_prediction_sort)
        print('y_prediction_indexes:', y_prediction_indexes)
        print('positive num', len(np.where(y_test)[0]))
        print('SR equation:', SR_agent)
        total_num = len(y_prediction)
        top_k_acc = cls.get_information_baseline(total_num, y_prediction_indexes, y_test, y_prediction)
        return top_k_acc, y_prediction

    @classmethod
    def get_information_baseline(cls, total_num, y_prediction_indexes, y_test, y_prediction):
        def get_confusion_matrix(sel_indexes, non_sel_indexes, y_test):
            positive_indexes = np.nonzero(y_test)[0]
            negative_indexes = np.nonzero(y_test == 0)[0]
            
            predict_positive_indexes = sel_indexes
            predict_negative_indexes = non_sel_indexes
            
            tp=0
            tn=0
            fp=0
            fn=0
            tp_indexes = list(set(predict_positive_indexes).intersection(positive_indexes))
            tp = len(tp_indexes)
            fp = len(predict_positive_indexes) - tp 

            tn_indexes = list(set(predict_negative_indexes).intersection(negative_indexes))
            tn = len(tn_indexes)
            fn = len(predict_negative_indexes) - tn
            return tp, tn, fp, fn
        def get_num_50_confusion_matrix(y_prediction, y_test):
            positive_indexes = np.where(y_test == 1)[0]
            negative_indexes = np.where(y_test == 0)[0]
            
            predict_positive_indexes = np.where(y_prediction >= 0.5)[0]
            predict_negative_indexes = np.where(y_prediction < 0.5)[0]
            
            tp=0
            tn=0
            fp=0
            fn=0
            tp_indexes = list(set(predict_positive_indexes).intersection(positive_indexes))
            tp = len(tp_indexes)
            fp = len(predict_positive_indexes) - tp 

            tn_indexes = list(set(predict_negative_indexes).intersection(negative_indexes))
            tn = len(tn_indexes)
            fn = len(predict_negative_indexes) - tn
            return tp, tn, fp, fn
        top_k_acc = []
        top_k_tp = []
        top_k_tn = []
        top_k_fp = []
        top_k_fn = []
        for k in range(9):
            percent = (k+1)*0.1
            sel_num = int(percent * total_num)
            sel_indexes = y_prediction_indexes[total_num-sel_num:]
            non_sel_indexes = y_prediction_indexes[:total_num-sel_num]
            tp_topk, tn_topk, fp_topk, fn_topk = get_confusion_matrix(sel_indexes, non_sel_indexes, y_test)
            accuracy = tp_topk / (tp_topk+fn_topk)
            # print(f'tp,tn,fp,fn:', tp_topk, tn_topk, fp_topk, fn_topk)
            # print(f'tp+fp', tp_topk+fp_topk)
            print(f'top{k+1}0% accuracy:', accuracy)
            top_k_acc.append(accuracy)
            top_k_tp.append(tp_topk)
            top_k_tn.append(tn_topk)
            top_k_fp.append(fp_topk)
            top_k_fn.append(fn_topk)           
        tp_50, tn_50, fp_50, fn_50 = get_num_50_confusion_matrix(y_prediction, y_test)
        accuracy_50 = (tp_50+tn_50)/total_num
        recall_50 = tp_50/(tp_50+fn_50)
        positive_ratio = (tp_50+fp_50)/total_num
        # print('tp_50,tn_50,fp_50,fn_50:', tp_50, tn_50, fp_50, fn_50)
        print('accuarcy_50', accuracy_50)
        print('recall_50', recall_50)
        print('positive_ratio', positive_ratio)
        return top_k_acc
