import copy
import os
import numpy as np
import torch
import argparse
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from torch.utils.data import DataLoader, Dataset
from torch_geometric import loader
from utils.utils import GraphDataset
from gplearn.genetic import SymbolicRegressor

class GraphDataLoader(DataLoader):
    def __init__(
        self,
        npy_data_path=None,
        save_dir=None,
        processed_npy_path=None,
        sample_rate=2,
        sample_bool=False, # wheter oversample
        train_type='train',
        load_type='default_order',
        batch_size=128,
        max_batch_size=10240,
        depth=1,
        # debug
        debug_log=False,
        # domain attribute
        label_dict=None,
        # fanin_nodes
        fanin_nodes_type='remove',
        name = None,
        normalize_type = None
    ):
        self.npy_data_path = npy_data_path
        self.sample_rate = sample_rate
        self.load_type = load_type
        self.sample_bool = sample_bool
        self.train_type = train_type
        self.save_dir = save_dir
        self.depth = depth
        self.debug_log = debug_log
        self.label_dict = label_dict
        self.fanin_nodes_type = fanin_nodes_type
        self.max_batch_size=max_batch_size
        self.name = name
        self.normalize_type = normalize_type
        process_data = self.process_data_up_sample

        self.dataset = {
            "samples": [],
            "default_order_features": None,
            "default_order_label": None,
            "default_order_children_features": None,
            "default_traverse_id": None,
            "first_traverse_id": None
        }
        self.end_to_end_stats = None
        self.features = None
        self.labels = None
        self.children = None
        # num samples
        self.num_samples = 0
        self.num_positive_samples = 0
        self.num_negative_samples = 0
        samples_list = []
        if processed_npy_path is None:
            if npy_data_path.endswith('.npy'):
                npy_file_path = npy_data_path
                self.load_data(npy_file_path)
                samples, bool_con = process_data()
                samples_list.extend(samples)
            else:
                for npy_file in os.listdir(npy_data_path):
                    npy_file_path = os.path.join(npy_data_path, npy_file)
                    self.load_data(npy_file_path)
                    samples, bool_con = self.process_data_up_sample()
                    if bool_con:
                        continue
                    samples_list.extend(samples)
            # save processed samples
            if not os.path.exists(f'/yqbai/GP/npy_data/{self.save_dir}'):
                os.mkdir(f'/yqbai/GP/npy_data/{self.save_dir}')
            np.save(f"/yqbai/GP/npy_data/{self.save_dir}/graph_samples_train_type_{train_type}_sample_bool_{self.name}.npy", samples_list)
        else:
            samples_list = np.load(processed_npy_path, allow_pickle=True)
        num_samples = len(samples_list)
        self.data_loader = loader.DataLoader(samples_list, num_samples, shuffle=False)

        self.graph_dataset = GraphDataset(samples_list)
        if self.train_type == 'train':
            total_num = len(samples_list)
            if total_num <= self.max_batch_size:
                self.graph_data_loader = loader.DataLoader(
                    self.graph_dataset, total_num,shuffle=False)
                print()
            else:
                sampler = BatchSampler(
                    SubsetRandomSampler(range(total_num)),
                    self.max_batch_size,
                    drop_last=False)
                self.graph_data_loader = loader.DataLoader(
                    self.graph_dataset, batch_sampler=sampler)
        elif self.train_type == 'test':
            total_num = len(samples_list)
            if total_num <= self.max_batch_size:
                self.graph_data_loader = loader.DataLoader(
                    self.graph_dataset, self.num_samples, shuffle=False)
            else:
                self.graph_data_loader = loader.DataLoader(
                    self.graph_dataset, self.max_batch_size, shuffle=False)

    def _normalize(self):
        if self.normalize_type == 'no_normalize':
            normalized_features = self.features
        if self.normalize_type == 'MMN':
            max_vals = np.max(self.features, axis=0)
            min_vals = np.min(self.features, axis=0)
            normalized_features = (self.features - min_vals) / (max_vals - min_vals)
        if self.normalize_type == 'MAX':
            max_vals = np.max(self.features, axis=0)
            normalized_features = self.features / max_vals
        if self.normalize_type == 'Z':
            avg_vals = np.average(self.features, axis=0)
            std_vals = np.std(self.features, axis=0)
            normalized_features = (self.features - avg_vals) / std_vals   
        self.features = normalized_features
        
    def _get_label(self, npy_file_path):
        for k in self.label_dict.keys():
            if k in npy_file_path:
                label_value = self.label_dict[k]
        return label_value

    def load_data(self, npy_file_path):
        npy_data = np.load(npy_file_path, allow_pickle=True).item()
        self.end_to_end_stats = npy_data['end_to_end_stats']
        self.dataset['default_order_features'] = npy_data['features_list'][0]
        self.dataset['default_order_label'] = npy_data['labels_list'][0]
        self.dataset['default_traverse_id'] = list(
            self.dataset['default_order_features'][:, 4])
        self.dataset['default_order_children_features'] = self.process_children(
            [npy_data['children'][0]])
        self.dataset['first_traverse_id'] = self.dataset['default_traverse_id'][0]

        if self.load_type == 'all_order':
            self.features = np.vstack(npy_data['features_list'])
            self.labels = np.vstack(npy_data['labels_list'])
            self.children = self.process_children(npy_data['children'])
        elif self.load_type == 'default_order':
            self.features = npy_data['features_list'][0]
            if self.label_dict is None:
                self.labels = npy_data['labels_list'][0]
                #normalize
                self._normalize()
            else:
                label_value = self._get_label(npy_file_path)
                self.labels = np.ones(
                    (self.features.shape[0], 1)) * label_value
                self.labels = self.labels.astype(np.int64)
            #\added by yqbai
            #self.children =  self.process_children([npy_data['children'][0]])
            self.children = self.dataset['default_order_children_features']
            self.num_samples = self.labels.shape[0]
            if self.debug_log:
                print(f"debug log features: {self.features}")
                print(f"debug log labels: {self.labels}")
                print(
                    f"debug log default_traverse_id: {self.dataset['default_traverse_id']}")
                print(f"debug log children: {self.children}")
                print(f"debug log npy file: {npy_file_path}")
                print(
                    f"debug log first_traverse_id: {self.dataset['first_traverse_id']}")

    def process_children(self, children):
        processed_children = []
        for child_list in children:
            for i in range(child_list.shape[0]):
                processed_chhildren_i = []
                for ind in child_list[i].split("/"):
                    try:#为什么这里要用try？
                        processed_chhildren_i.append(int(ind))
                    except:
                        continue
                    if self.depth == 2:
                        try:
                            actual_index = self.dataset['default_traverse_id'].index(
                                int(ind))
                            # print(f"found actual index: {actual_index}")
                            for ind_2 in child_list[actual_index].split("/"):
                                try:
                                    processed_chhildren_i.append(int(ind_2))
                                except:
                                    continue
                        except:
                            pass
                            #  print(f"found fanin nodes")
                processed_children.append(processed_chhildren_i)
                print(f"{i} th children index: {processed_chhildren_i}")
        return processed_children

    def get_children(self, i):
        children_indexes = self.children[i]
        children_features = []
        for id in children_indexes:
            index = id - self.dataset['first_traverse_id']
            if index >= 0:
                # index = self.dataset['default_traverse_id'].index(id)
                children_feature = self.dataset['default_order_features'][index:index+1, :]
                children_features.append(children_feature)
            else:
                if self.fanin_nodes_type == 'zero':
                    children_feature = np.zeros((1, 69), dtype=np.int32)
                elif self.fanin_nodes_type == 'one':
                    children_feature = np.ones((1, 69), dtype=np.int32)
                elif self.fanin_nodes_type == 'remove':
                    continue
                children_features.append(children_feature)

            # try:
            #     index = self.dataset['default_traverse_id'].index(id)
            #     children_feature = self.dataset['default_order_features'][index:index+1,:]
            #     children_features.append(children_feature)
            # except:
            #     # print(f"current node ones fanin!!!!")
            #     if self.fanin_nodes_type == 'zero':
            #         children_feature = np.zeros((1,69), dtype=np.int32)
            #     elif self.fanin_nodes_type == 'one':
            #         children_feature = np.ones((1,69), dtype=np.int32)
            #     elif self.fanin_nodes_type == 'remove':
            #         continue
            #     children_features.append(children_feature)

        if len(children_features) == 0:
            children_features.append(np.ones((1, 69), dtype=np.int32))
        return np.vstack(children_features)

    def get_edge_indexes(self, num_children):
        zero_row = np.zeros((1, num_children), dtype=np.int32)
        one_row = np.arange(num_children).astype(np.int32)
        one_row = np.expand_dims(one_row, axis=0)
        edge_indexes = np.vstack([zero_row, one_row])
        return edge_indexes

    def process_data_up_sample(self):
        # get positive and negative samples
        positive_indexes = np.nonzero(self.labels)[0]
        negative_indexes = np.nonzero(self.labels == 0)[0]
        # upsampling positive samples
        num_positive_sample = len(positive_indexes)
        num_negative_sample = len(negative_indexes)
        self.num_positive_samples += num_positive_sample
        self.num_negative_samples += num_negative_sample
        if num_positive_sample <= 0:
            return [], True
        upsample_rate = int(num_negative_sample / num_positive_sample)

        print(f"children shape: {len(self.children)}")
        print(f"total samples: {num_positive_sample+num_negative_sample}")
        samples = []
        for i in range(num_positive_sample+num_negative_sample):
            pivot_node_features = torch.FloatTensor(self.features[i:i+1, :])
            label = torch.LongTensor(self.labels[i:i+1, :])
            children_features = self.get_children(i) # （child数量，69）向量
            edge_indexes = self.get_edge_indexes(children_features.shape[0])
            sample = (pivot_node_features,label)# 一个训练样本是如何设计的
            samples.append(sample)
            if self.sample_bool:
                if i in positive_indexes:
                    # repeat positive samples
                    for _ in range(upsample_rate):
                        samples.append(sample)
            if self.debug_log:
                print(
                    f"debug log sample {i}, pivot_node_features: {pivot_node_features}")
                print(f"debug log sample {i}, label: {label}")
                print(
                    f"debug log sample {i}, children_features: {children_features}")
                print(f"debug log sample {i}, edge_indexes: {edge_indexes}")

        self.dataset['samples'] = samples
        return samples, False

if __name__ == '__main__':
    """
    test for graph data loader for single/multi domain binar class train
    """
    parser = argparse.ArgumentParser(description='test scripts')
    parser.add_argument('--npy_data_path', type=str,
                        default='/yqbai/GP/npy_data/iwls2005_5000/iwls2005_ethernet_test/train')
    parser.add_argument('--train_type', type=str,
                        default='train')
    parser.add_argument('--load_type', type=str,
                        default='default_order')
    parser.add_argument('--name', type=str,
                        default='ethernet')
    parser.add_argument('--sample_bool', action='store_true',
                        default=False)
    args = parser.parse_args()#加入args=【】代表无法从命令行传入参数，均为默认参数
    print(args.npy_data_path)
    print(args.train_type)
    print(args.load_type)
    print(args.name)
    print(args.sample_bool)
    data_loader = GraphDataLoader(
        npy_data_path=args.npy_data_path,
        sample_bool = args.sample_bool,
        train_type=args.train_type,
        load_type=args.load_type,
        name=args.name
    )
