'''
Imitation learning based training of the node classifier. 
The training parameters can be modified from model.setting.
'''
import gzip
import os.path
import pickle
import shutil
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import numpy as np
from torch_geometric.loader import DataLoader
from tqdm import tqdm

from models.setting import *
from models.gnn_policy import MyGNN
from models.gnn_dataset import GraphNodeDataset

from models.dagger_collect_data_multiprocess import DataCollect

torch.set_num_threads(1)


class TrainMyGNN(object):
    def __init__(self, root=ROOT, train_parameters: TrainParameters = None):
        """
        Runs dagger for imitating optimal node pruning policy 
        @params: 
            data_path:
        """
        # extract train parameters
        self.train_parameters = train_parameters
        self.N, self.M = train_parameters.N, train_parameters.M
        self.batch_size = train_parameters.batch_size
        self.train_epochs = train_parameters.train_epochs

        # create folders
        data_path = os.path.join(root, 'train_mygnn', 'data', f'M{self.M}_N{self.N}')
        model_path = os.path.join(root, 'train_mygnn', 'model', f'M{self.M}_N{self.N}')
        result_path = os.path.join(root, 'train_mygnn', 'result', f'M{self.M}_N{self.N}')
        # clean all model files
        if os.path.isdir(model_path):
            path = Path(model_path)
            shutil.rmtree(path)
        Path(data_path).mkdir(parents=True, exist_ok=True)
        Path(model_path).mkdir(parents=True, exist_ok=True)
        Path(result_path).mkdir(parents=True, exist_ok=True)

        self.data_path = data_path
        self.model_path = model_path
        self.result_path = result_path

        self.train_filepath = os.path.join(data_path, 'train_data')
        self.test_filepath = os.path.join(data_path, 'test_data')
        Path(self.train_filepath).mkdir(exist_ok=True)
        Path(self.test_filepath).mkdir(exist_ok=True)

        self.prune_model = MyGNN(hidden_channels=HIDDEN_CHANNEL, num_layers=NUM_LAYER)
        # self.optimizer = torch.optim.Adam(self.prune_model.parameters(), lr=1e-3, weight_decay=1e-3)
        self.optimizer = torch.optim.RMSprop(self.prune_model.parameters(), lr=1e-3, weight_decay=2e-3, momentum=0.8)
        # self.result_filename  = os.path.join(result_path, 'result.txt')
        # file_handle = open(self.result_filename, 'a')
        # file_handle.write('aaaaaa')
        # file_handle.close()



        self.train_loss_list = []
        self.valid_loss_list = []
        self.min_accuracy = 0.5 # min(true_positive_ratio,true_negative_ratio)

        # The ratio of the number of non-pruned nodes to the number of all nodes.
        self.non_pruned_node_ratio = None
        self.preprocess_train_filepath = None
        self.preprocess_test_filepath = None

    def generate_data(self):
        train_data_collector = DataCollect(train_parameters=self.train_parameters,
                                           train_filepath=self.train_filepath)

        test_data_collector = DataCollect(train_parameters=self.train_parameters,
                                          train_filepath=self.test_filepath)

        # data collection stage
        train_data_collector.collect_data(num_instances=NUM_TRAIN_EXAMPLES)
        test_data_collector.collect_data(num_instances=NUM_TEST_EXAMPLES)

    def preprocess_data2(self):
        # train files
        prune_node_files = []
        non_prune_node_files = []
        train_files = [str(path) for path in Path(self.train_filepath).glob('sample_*.pkl')]
        for file in train_files:
            with gzip.open(file, 'rb') as f:
                (features, label) = pickle.load(f)
            if label:
                prune_node_files.append(file)
            else:
                non_prune_node_files.append(file)
        num_prune_node = len(prune_node_files)
        num_non_prune_node = len(non_prune_node_files)
        preprocess_train_data_path = os.path.join(self.data_path, 'preprocess_train_data')
        Path(preprocess_train_data_path).mkdir(exist_ok=True)
        # copy train files into the folder 'preprocess_train_data'
        for idx in range(min(num_prune_node, num_non_prune_node)):
            shutil.copy(prune_node_files[idx], os.path.join(preprocess_train_data_path,
                                                            os.path.basename(prune_node_files[idx])))
            shutil.copy(non_prune_node_files[idx], os.path.join(preprocess_train_data_path,
                                                                os.path.basename(non_prune_node_files[idx])))

        # test files
        prune_node_files = []
        non_prune_node_files = []
        test_files = [str(path) for path in Path(self.test_filepath).glob('sample_*.pkl')]
        for file in test_files:
            with gzip.open(file, 'rb') as f:
                (features, label) = pickle.load(f)
            if label:
                prune_node_files.append(file)
            else:
                non_prune_node_files.append(file)
        num_prune_node = len(prune_node_files)
        num_non_prune_node = len(non_prune_node_files)
        preprocess_test_data_path = os.path.join(self.data_path, 'preprocess_test_data')
        Path(preprocess_test_data_path).mkdir(exist_ok=True)
        # copy test files into the folder 'preprocess_test_data'
        for idx in range(min(num_prune_node, num_non_prune_node)):
            shutil.copy(prune_node_files[idx], os.path.join(preprocess_test_data_path,
                                                            os.path.basename(prune_node_files[idx])))
            shutil.copy(non_prune_node_files[idx], os.path.join(preprocess_test_data_path,
                                                                os.path.basename(non_prune_node_files[idx])))
        return preprocess_train_data_path, preprocess_test_data_path

    def preprocess_data(self):
        # train files
        prune_node_files = []
        non_prune_node_files = []
        train_files = [str(path) for path in Path(self.train_filepath).glob('sample_*.pkl')]
        for file in train_files:
            with gzip.open(file, 'rb') as f:
                (features, label) = pickle.load(f)
            if label:
                prune_node_files.append(file)
            else:
                non_prune_node_files.append(file)
        num_prune_node = len(prune_node_files)
        num_non_prune_node = len(non_prune_node_files)

        # delete some files to make that the number of prune nodes is equal to that of non prune nodes.
        if num_prune_node > num_non_prune_node:
            indices_del = np.random.choice(np.array(list(range(num_prune_node))),
                                           num_prune_node - num_non_prune_node, replace=False)
            for idx in indices_del:
                os.remove(prune_node_files[idx])
        else:
            indices_del = np.random.choice(np.array(list(range(num_non_prune_node))),
                                           num_non_prune_node - num_prune_node, replace=False)
            for idx in indices_del:
                os.remove(non_prune_node_files[idx])
        # test data
        prune_node_files = []
        non_prune_node_files = []
        test_files = [str(path) for path in Path(self.test_filepath).glob('sample_*.pkl')]
        for file in test_files:
            with gzip.open(file, 'rb') as f:
                (features, label) = pickle.load(f)
            if label:
                prune_node_files.append(file)
            else:
                non_prune_node_files.append(file)
        num_prune_node = len(prune_node_files)
        num_non_prune_node = len(non_prune_node_files)

        # delete some files to make that the number of prune nodes is equal to that of non prune nodes.
        if num_prune_node > num_non_prune_node:
            indices_del = np.random.choice(np.array(list(range(num_prune_node))),
                                           num_prune_node - num_non_prune_node, replace=False)
            for idx in indices_del:
                os.remove(prune_node_files[idx])
        else:
            indices_del = np.random.choice(np.array(list(range(num_non_prune_node))),
                                           num_non_prune_node - num_prune_node, replace=False)
            for idx in indices_del:
                os.remove(non_prune_node_files[idx])
    def test(self, epoch):
        # load dataset
        test_files = [str(path) for path in Path(self.test_filepath).glob('sample_*.pkl')]
        test_data = GraphNodeDataset(test_files)
        test_loader = DataLoader(test_data, batch_size=self.batch_size, shuffle=True)

        # Open the test mode of the model
        self.prune_model.eval()
        # test stage
        bce_loss = nn.BCELoss()
        mean_loss = 0
        n_samples_processed = 0
        accuracy_statistics = np.zeros((2, 2))
        for data in (test_loader):
            out = self.prune_model(data.x_dict, data.edge_index_dict, data.num_graphs)
            loss = bce_loss(out, data['graph_label'])

            predicted_bestindex = (out > 0.5) * 1
            batch_size = len(out)
            for idx_batch in range(batch_size):
                accuracy_statistics[int(data['graph_label'][idx_batch]), int(predicted_bestindex[idx_batch])] += 1

            mean_loss += loss.item() * batch_size
            n_samples_processed += batch_size

        accuracy = (accuracy_statistics[0, 0] + accuracy_statistics[1, 1]) / np.sum(accuracy_statistics)
        true_negative_ratio = accuracy_statistics[0, 0] / (accuracy_statistics[0, 0] + accuracy_statistics[0, 1])
        true_positive_ratio = accuracy_statistics[1, 1] / (accuracy_statistics[1, 0] + accuracy_statistics[1, 1])
        mean_loss /= n_samples_processed



        if (true_negative_ratio>self.min_accuracy and true_positive_ratio>self.min_accuracy):
            self.min_accuracy = min(true_positive_ratio,true_negative_ratio)
            # save model
            prune_model_file = os.path.join(self.model_path, 'gnn_{:5f}.pth'.format(self.min_accuracy))
            torch.save(self.prune_model.state_dict(), prune_model_file)
            print("\n\n Save Epoch: {} test_loss: {:<.5f}, acc: {:<.5f}, TNR:{:<.5f}, TPR:{:<.5f}\n\n".format(epoch,
                                                                                                    mean_loss, accuracy,
                                                                                                    true_negative_ratio,
                                                                                                    true_positive_ratio))


        self.valid_loss_list.append(mean_loss)

        print("Epoch: {} test_loss: {:<.5f}, acc: {:<.5f}, TNR:{:<.5f}, TPR:{:<.5f}".format(epoch,
                                                                                            mean_loss, accuracy,
                                                                                            true_negative_ratio,
                                                                                            true_positive_ratio))

    def train(self):
        # load dataset
        train_files = [str(path) for path in Path(self.train_filepath).glob('sample_*.pkl')]
        train_data = GraphNodeDataset(train_files)
        train_loader = DataLoader(train_data, batch_size=self.batch_size, shuffle=True)

        # Open the training mode of the model
        self.prune_model.train()

        # training stage
        bce_loss = nn.BCELoss()
        for epoch in range(self.train_epochs):
            mean_loss = 0
            n_samples_processed = 0
            accuracy_statistics = np.zeros((2, 2))
            for data in (train_loader):
                self.optimizer.zero_grad()
                out = self.prune_model(data.x_dict, data.edge_index_dict, data.num_graphs)
                loss = bce_loss(out, data['graph_label'])
                loss.backward()
                self.optimizer.step()

                predicted_bestindex = (out > 0.5) * 1
                batch_size = len(out)
                for idx_batch in range(batch_size):
                    accuracy_statistics[int(data['graph_label'][idx_batch]), int(predicted_bestindex[idx_batch])] += 1

                mean_loss += loss.item() * batch_size
                n_samples_processed += batch_size

            accuracy = (accuracy_statistics[0, 0] + accuracy_statistics[1, 1]) / np.sum(accuracy_statistics)
            true_negative_ratio = accuracy_statistics[0, 0] / (accuracy_statistics[0, 0] + accuracy_statistics[0, 1])
            true_positive_ratio = accuracy_statistics[1, 1] / (accuracy_statistics[1, 0] + accuracy_statistics[1, 1])
            mean_loss /= n_samples_processed
            self.train_loss_list.append(mean_loss)
            # print("Train: loss: {:<.5f}, acc: {:<.5f}, TNR:{:<.5f}, TPR:{:<.5f}".format(
            #     mean_loss, accuracy, true_negative_ratio, true_positive_ratio))
            self.test(epoch)


    def get_non_pruned_node_ratio(self):
        # The ratio of the number of non-pruned nodes to the number of all nodes.
        # load dataset
        train_files = [str(path) for path in Path(self.train_filepath).glob('sample_*.pkl')]
        train_data = GraphNodeDataset(train_files)
        train_loader = DataLoader(train_data, batch_size=64, shuffle=True)
        num_non_pruned_node = 0
        num_total_node = 0
        for data in (train_loader):
            num_total_node += data['graph_label'].numel()
            for label in data['graph_label']:
                if label == 0:
                    num_non_pruned_node += 1
        return num_non_pruned_node / num_total_node

    def get_loss_weight(self, batch, target) -> torch.Tensor:
        if self.non_pruned_node_ratio == None:
            self.non_pruned_node_ratio = self.get_non_pruned_node_ratio()
        batch_size = batch.num_graphs
        # loss weights
        wts = np.ones(batch_size) / self.non_pruned_node_ratio
        wts[target] = 1 / (1 - self.non_pruned_node_ratio)
        return torch.tensor(wts)

    def draw(self):
        x = list(range(1, len(self.train_loss_list) + 1))
        # create figure
        fig, ax = plt.subplots(figsize=(5, 2.7), layout='constrained')
        ax.plot(x, self.train_loss_list, '-k', label='train_loss')
        ax.plot(x, self.valid_loss_list, '-r', label='valid_loss')
        plt.title('Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()
        # save figure
        # file_name = os.path.join(self.result_path, 'loss_png')
        plt.savefig(os.path.join(self.result_path, 'loss_png'), dpi=300)
        plt.show()


def is_empty_directory(path):
    return len([f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]) == 0


if __name__ == '__main__':
    N = 5
    M = N + 1
    train_parameters = [TrainParameters(N=N, M=M, batch_size=128, train_epochs=400)]

    for param in train_parameters:
        train_mygnn = TrainMyGNN(root=ROOT, train_parameters=param)
        if is_empty_directory(train_mygnn.train_filepath):
            train_mygnn.generate_data()
            train_mygnn.preprocess_data()
            print(f'non_pruned_node_ratio:{train_mygnn.get_non_pruned_node_ratio()}')
        train_mygnn.train()
        train_mygnn.draw()
