'''
    get GNN在训练集和测试集上的输入-输出对，用于向符号学习模型蒸馏
'''
import o2_data_loader as data_loader
import os 
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import utils.gcn_policy as gcn_policy
import torch
import numpy as np
NPY_TYPE = 'train' #train or test
# DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
DEVICE = torch.device('cpu')
GCNPOLICY_CLASS = 'GCNPolicy'
BATCH_SIZE = 10240
POLICY_KWARGS = {
    'mean_max': 'mean',
    'emd_size': 128,
    'out_size': 2,
    'num_pivot_node_features': 20,
    'num_children_node_features': 20
}

def get_resub_score(npy_data_path, gcnmodel, save_path, normalize_type, save_bool=False):
    train_data_loader = []
    for npy_file in os.listdir(npy_data_path):
        print(npy_file)
        cur_npy_file_path = os.path.join(npy_data_path, npy_file)
        if NPY_TYPE == 'train':
            train_data_loader.append(getattr(data_loader, 'GraphResubDataLoader')(
                npy_data_path = cur_npy_file_path,
                save_dir = 'v2_processed_data',
                #    processed_npy_path = null,
                train_type = 'train',
                batch_size = 512,
                max_batch_size = BATCH_SIZE,
                fanin_nodes_type = 'remove',
                normalize = normalize_type,
                )
            )
        elif NPY_TYPE == 'test':
            train_data_loader = getattr(data_loader, 'GraphResubDataLoader')(
                npy_data_path = cur_npy_file_path,
                save_dir = 'v2_processed_data',
                #    processed_npy_path = null,
                train_type = 'test',
                batch_size = 512,
                max_batch_size = BATCH_SIZE,
                fanin_nodes_type = 'remove',
                normalize = normalize_type,
                )
    graph_policy = load_graph_model(gcnmodel)
    # predict_scores_list = []
    features_list = []
    labels_list = []
    prediction_list = []
    for i, domain_train_data_loader in enumerate(train_data_loader):
        for batch in domain_train_data_loader.graph_data_loader:
            # print("evaluating data loader ..............")
            batch_pivot_node_features = batch.pivot_node_features.to(DEVICE)
            batch_children_node_features = batch.children_node_features.to(DEVICE)
            batch_edge_indexes = batch.edge_index.to(DEVICE)
            labels = batch.label.float().numpy()
            with torch.no_grad():
                predict_scores = graph_policy(
                    batch_pivot_node_features,
                    batch_edge_indexes,
                    batch_children_node_features
                )
            predict_scores = predict_scores.cpu().detach().numpy()
            predict_scores = np.mean(predict_scores, axis=1, keepdims=True)
            labels = process(labels)
        # labels = []
        # for x_train, y_train in domain_train_data_loader.data_loader:
        #     labels.append(process(y_train.numpy().reshape(-1,)))
        # correct_scores = correct(labels, predict_scores)
        # assert (correct_scores!=predict_scores).any()#如果一样就报错
        # sort_indexes = np.argsort(labels)
        # batch_pivot_node_features = batch_pivot_node_features[list(sort_indexes)]
        # assert (batch_pivot_node_features_2!=batch_pivot_node_features).any()#如果一样就报错
        # predict_scores = correct_scores
        # labels = labels[0][sort_indexes].reshape(-1,1)
        # predict_scores_list.append(predict_scores)
        # torch.cuda.empty_cache()
            features_list.append(batch_pivot_node_features.cpu().numpy())
            labels_list.append(labels)
            prediction_list.append(predict_scores)
    if save_bool:
        save_data = {
            'features':np.vstack(features_list), 
            'labels':np.vstack(labels_list), 
            'prediction':np.vstack(prediction_list)
            }
        save(save_data, 0, save_path)
    predict_scores = np.vstack(prediction_list)
    features = np.vstack(features_list)
    labels = np.vstack(labels_list)
    # predict_scores = np.mean(predict_scores, axis=1, keepdims=True)
    return features, labels, predict_scores

def process(y_test):
    for i in range(len(y_test)):
        if y_test[i] > 0:
            y_test[i] = 1
    return y_test

def correct(labels, predict_scores):
    true_indexes = np.where(labels>0)[0]
    score_indexes = np.argsort(predict_scores.reshape(-1))#从小到大
    length = len(score_indexes)
    k = 0.2
    min = predict_scores[score_indexes[int((1-k) * length)]]
    max = predict_scores[score_indexes[-1]]
    for index in score_indexes[:int((1-k) * len(score_indexes))]:
        if index in true_indexes:
            predict_scores[index] = np.random.uniform(min, max)
    return predict_scores

def save(data, i, save_path):
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    save_path = os.path.join(save_path, f'train_domain_{i+1}.npy')
    np.save(save_path, data)

    
def load_graph_model(gcnmodel):
    graph_policy = getattr(gcn_policy, GCNPOLICY_CLASS)(
        **POLICY_KWARGS).to(DEVICE)
    if gcnmodel is not None:
        model_state_dict = torch.load(gcnmodel, map_location=DEVICE)
        graph_policy.load_state_dict(model_state_dict)
    graph_policy.eval()

    return graph_policy


if __name__ == "__main__":
    name_list = ['des_perf','ethernet','vga_lcd','wb_conmax']
    normalize = False
    if normalize:
        for name in name_list:
            npy_data_path = f'/yqbai/GLENORE/TPAMI/npy_data/training_data_for_GNN/iwls2005_{name}_test/multi_domains/train'
            gcnmodel = f'/yqbai/GLENORE/TPAMI/npy_data/GNN/iwls2005_{name}_test/normalize_model/itr_1999.pkl'
            save_path = f'/yqbai/GLENORE/TPAMI/npy_data/GNN/iwls2005_{name}_test/multi_domain_normalize_domain'
            normalize_type = True
            score = get_resub_score(npy_data_path, gcnmodel, save_path, normalize_type)
    else:
        for name in name_list:
            npy_data_path = f'/yqbai/GLENORE/TPAMI/npy_data/training_data_for_GNN/iwls2005_{name}_test/multi_domains/train'
            gcnmodel = f'/yqbai/GLENORE/TPAMI/npy_data/GNN/iwls2005_{name}_test/non_normalize_model/itr_1999.pkl'
            save_path = f'/yqbai/GLENORE/TPAMI/npy_data/GNN/iwls2005_{name}_test/multi_domain_non_normalize_domain'
            normalize_type = False
            score = get_resub_score(npy_data_path, gcnmodel, save_path, normalize_type)