n__author__ = 'dk'
##训练脚本

import numpy as np
import matplotlib.pylab as plt
import tqdm
from tqdm import tqdm
from tqdm import trange
import os
import dataset_builder
import argparse
import logger_wrappers

import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from model_serialization import save,load
import spapp_classifier

import sys
from model_base import abs_model
from config import raw_dataset_base, min_flow_len
import select_gpu
device_id = select_gpu.get_free_gpu_id()
use_gpu = torch.cuda.is_available()
if use_gpu :
    device= "cuda:{0}".format(device_id)
else:
    device= "cpu"

class model(abs_model):
    def __init__(self, dataset, randseed, splitrate, max_len=200):
        super(model,self).__init__('fgnet', randseed=randseed)
    
        self.dataset = dataset
        if os.path.exists(self.database) == False:
            os.makedirs(self.database,exist_ok=True)
        self.splitrate = splitrate

        self.model_path = self.database + self.name + '_' + self.dataset + '_model'
        print(f"model_path {self.model_path}!") # TODO: path for caching trained model
        if not os.path.exists(self.model_path):
            os.makedirs(self.model_path)

        self.data_path = self.database + self.name + '_' + self.dataset
        print(f"data_path {self.data_path}!") # TODO: path for caching parsed data
        if not os.path.exists(self.data_path):
            os.makedirs(self.data_path)

        self.dumpFilename  = self.data_path+"/dataset_builder.pkl.gzip"
        if not os.path.exists(self.dumpFilename):
            print(f"dumpFilename {self.dumpFilename} does not exist!")
        print("dumpFilename:", self.dumpFilename)

        self.gen_config(home=self.model_path, data_dir=self.data_path)
        self.parse_raw_data()

    def gen_config(self, home, data_dir, mode='train'):
        self.parameter={
            'E1':{
                'latent_feature_length':40,
                'nb_layer': 2
                },
            'E7': {
                'latent_feature_length': 200,
                'nb_layer': 2
            },
            'E10': {
                'latent_feature_length': 100,
                'nb_layer': 4
            },
            'E2':{
                'latent_feature_length': 60,
                'nb_layer': 2
            },
            'E3': {
                'latent_feature_length': 80,
                'nb_layer': 2
            },
            'E4': {
                 'latent_feature_length': 100,
                'nb_layer': 2
                 },
            'E5': {
                'latent_feature_length': 120,
                'nb_layer': 2
                },
            'E6': {
                'latent_feature_length': 140,
                'nb_layer': 2
            },
            'E8': {
                'latent_feature_length': 100,
                'nb_layer': 1
            },
            'E9': {
                'latent_feature_length': 100,
                'nb_layer': 3
            },


        }
        self.finished={'E1'}


    def parse_raw_data(self):
        self.full_rdata = os.path.join(".", "dataset", self.dataset)
        if not os.path.exists(self.full_rdata):
            print(f"data_path {self.full_rdata} does not exist!")
            raise 1
        print("log_directory:", self.full_rdata)

        #构建图加载器
        self.data_loader = dataset_builder.FlowContainerJSONDataset(mode='clear',
                                                    dumpData=True,usedumpData=True,
                                                    dumpFilename=self.dumpFilename,
                                                    cross_version=False,
                                                    test_split_rate=self.splitrate,
                                                    graph_json_directory=self.full_rdata)
        #elif self.dataset.startswith("CIC-IoT-2017"):
        #self.data_loader = dataset_builder.ZeekFlowmeterJSONDataset(mode='clear',
        #                                                            dumpData=True, usedumpData=True,
        #                                                            dumpFilename=self.dumpFilename,
        #                                                            cross_version=False,
        #                                                            test_split_rate=self.splitrate,
        #                                                            graph_json_directory=self.full_rdata)


    def train(self):
        best_model_acc = 0
        best_model = 0
        for each in self.parameter:
            now_model_acc = 0
            if each in self.finished:
                continue
            print('#'*100)
            print('Now begin new experiment with the following model parameter!')
            print(self.parameter[each])
            self.model = spapp_classifier.App_Classifier(len(self.data_loader.labelNameSet),
                                                         use_gpu=use_gpu,device=device,layer_type='GAT',
                                                         latent_feature_length=self.parameter[each]['latent_feature_length'],
                                                         nb_layers=self.parameter[each]['nb_layer'])
            self.loss_func = nn.CrossEntropyLoss()
            self.optimizer = optim.Adam(params=self.model.parameters(),lr=5e-5)
            self.model = load(self.model,self.optimizer,checkpoint_path=self.model_path+"/saved_model_{0}/".format(each))
            if use_gpu:
                self.model = self.model.cuda(device)
                self.loss_func = self.loss_func.cuda(device)

            #训练
            self.model.train()
            epoch_losses = []
            epoch_acces = []
            max_epoch = 40
            self.batch_size = 2
            self.data_loader.epoch_over = False
            for epoch in trange(max_epoch):
                epoch_loss = 0
                iter = 0
                while self.data_loader.epoch_over == epoch:
                    graphs,labels= self.data_loader.next_train_batch(self.batch_size)
                    if use_gpu :
                        graphs = graphs.to(torch.device(device))
                        labels = labels.to(torch.device(device))
                    predict_label = self.model(graphs)
                    #print(predict_label.size())
                    #print(labels.size())
                    loss = self.loss_func(predict_label,labels)
                    self.optimizer.zero_grad()
                    loss.backward()
                    self.optimizer.step()
                    if use_gpu:
                        lv= loss.detach().item()
                    else:
                        lv = loss.detach().cpu().item()
                    epoch_loss += lv
                    iter +=1
                    #print('Inner loss: {:.4f},Train Watch:{}'.format(lv,data_loader.train_watch))
                    #epoch_losses.append(lv)
                epoch_loss /= (iter+0.0000001)
                info='Epoch {}, loss: {:.4f}'.format(epoch,epoch_loss)
                logger_wrappers.warning(info)
                epoch_losses.append(epoch_loss)
                #测试一下:
                graphs,labels = self.data_loader.next_valid_batch(batch_size=self.batch_size)
                if use_gpu :
                    graphs = graphs.to(torch.device(device))
                    labels = labels.to(torch.device(device))
                predict_labels = self.model(graphs)
                predict_labels = F.softmax(predict_labels,1)
                argmax_labels = torch.argmax(predict_labels,1)
                print(argmax_labels)
                print(labels)
                acc = (labels == argmax_labels).float().sum().item() / len(labels) * 100
                now_model_acc += acc
                info='Accuracy of argmax predictions on the valid set: {:4f}%'.format(
                    acc)
                epoch_acces.append(acc)
                logger_wrappers.info(info)
                ###保存一下模型
                save(self.model,self.optimizer,checkpoint_path=self.model_path+"/saved_model_{0}/".format(each))
            if len(epoch_acces)!=0 and len(epoch_losses)!=0:
                plt.title('loss and accuracy across epochs')
                plt.plot(epoch_losses,label='loss')
                plt.plot(epoch_acces,label='accuracy')
                plt.savefig("./epoch_losses.png")
            if now_model_acc > best_model_acc:
                best_model = each
                best_model_acc = now_model_acc
        print('The Best Model is:')
        print(self.parameter[best_model])
        print('The acc is:')
        print(best_model_acc / max_epoch)
        

    def test(self):
        ##最后完整的测试一波

        for each in self.parameter:
            if each in self.finished:
                continue
            print('#'*100)
            print('Now begin eval with the following model parameter!')
            print(self.parameter[each])
            self.model = spapp_classifier.App_Classifier(len(self.data_loader.labelNameSet),
                                                         use_gpu=use_gpu,device=device,layer_type='GAT',
                                                         latent_feature_length=self.parameter[each]['latent_feature_length'],
                                                         nb_layers=self.parameter[each]['nb_layer'])
            self.optimizer = optim.Adam(params=self.model.parameters(), lr=5e-5)
            self.model = load(self.model, self.optimizer,
                              checkpoint_path=self.model_path + "/saved_model_{0}/".format(each))
            if use_gpu:
                self.model = self.model.cuda(device)
                self.loss_func = self.loss_func.cuda(device)

            self.model.eval()
            self.batch_size = 8
            acc_list =[]
            for subset in range(len(self.data_loader.test_index)//self.batch_size):
                graphs,labels = self.data_loader.next_test_batch(batch_size=self.batch_size)
                if use_gpu :
                    graphs = graphs.to(torch.device(device))
                    labels = labels.to(torch.device(device))
                predict_labels = self.model(graphs)
                predict_labels = F.softmax(predict_labels,1)
                argmax_labels = torch.argmax(predict_labels,1)
                print(argmax_labels)
                print(labels)
                acc = (labels == argmax_labels).float().sum().item() / len(labels) * 100
                acc_list.append(acc)
                info='Accuracy of argmax predictions on the test subset{1}: {0:4f}%'.format(acc,subset)
                logger_wrappers.info(info)
            info = 'Average Accuracy on test set:{:0.4f}%'.format(np.mean(acc_list))
            logger_wrappers.info(info)
            # del self.model
            # del self.data_loader


if __name__ == "__main__":    
    parser = argparse.ArgumentParser(description='fgnet model')
    parser.add_argument('--dataset', '-d', type=str, help='dataset name', required=True, dest='dataset')
    args = parser.parse_args()
    
    dataset = args.dataset
    print("using dataset",dataset)

    # NOTE: 测试集占比
    test_split_rate = 0.1 ## TODO: 这个参数还没用上
    # for test_rate in [ 0.6]:
    print("test_rate is", test_split_rate)

    fgnet_model = model(dataset, randseed=128, splitrate=test_split_rate)
    fgnet_model.train()
    fgnet_model.test()






