import argparse
import torch
import numpy as np
import random
from gnn_transformer.own_dataset import MyDataset
from gnn_transformer.own_dataset import MySimpleDataset
from torch_geometric.data import DataLoader
from gnn_transformer.own_net import GNNNet
from gnn_transformer.utils import DataProcesser
import torch.nn as nn
import os
import time
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"


def get_parameter():
    parser = argparse.ArgumentParser(description="Demo of argparse")
    parser.add_argument('-dr','--dataset_root', default='my_dataset')
    parser.add_argument('-bs','--batch_size', default=32)
    parser.add_argument('-lr', '--learning_rate', default=0.01)
    parser.add_argument('-ne', '--num_epochs', default=500)
    parser.add_argument('-nfd', '--node_feature_dimension', default=1)
    parser.add_argument('-tsp', '--training_set_proportion', default=0.7)




    args = parser.parse_args()
    return args

if __name__ == '__main__':
    # seed
    seed=6
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)  # if you are using multi-GPU.
    np.random.seed(seed)  # Numpy module.
    random.seed(seed)  # Python random module.
    torch.manual_seed(seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True
    def _init_fn(worker_id):
        np.random.seed(int(seed))

    args=get_parameter()
    # dataset = MyDataset(root=args.dataset_root)
    # dataset = MySimpleDataset().get_dataset()
    dataset = MyDataset(type='multi_flow_multi_link_mixed',network='network_2')
    """ 2.使用 DataLoader 构建train_loader """
    # data_loader = DataLoader(dataset=dataset,
    #                           batch_size=32,
    #                           shuffle=True,
    #                           num_workers=0)
    # 划分训练集和验证集
    train_dataLoader,test_dataLoader = DataProcesser.get_data_loader(dataset,args)


    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(device)
    # model = GNNNet(input_feature=args.node_feature_dimension)
    model = GNNNet()
    model = model.to(device)
    criterion = nn.MSELoss()
    criterion = criterion.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

    # flow_label =dataset['flow_label']
    line_x=[]
    line_y1=[]
    line_y2 = []
    for epoch in range(args.num_epochs):
        start_time=time.time()
        model.train()
        ep_train_loss=0
        for i, data in enumerate(train_dataLoader, 0):
            # print(data)
            # Forward pass
            # data = data.to(device)
            outputs = model(data)
            targets = data[3].to(device)
            loss = criterion(outputs, targets)
            ep_train_loss = ep_train_loss+loss.item()
            # Backward and optimizer
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        end_time = time.time()
        if (epoch + 1) % 100 == 0:
            print(outputs, targets)

        model.eval()
        with torch.no_grad():
            ep_test_loss=0
            for i,data in  enumerate(test_dataLoader,0):
                outputs = model(data)
                targets = data[3].to(device)
                if (epoch + 1) % 10 == 0 and i==9:
                    # print(outputs[0],targets[0])
                    pass
                loss = criterion(outputs, targets)
                ep_test_loss = ep_test_loss+loss.item()

        # Plot data
        line_x.append(epoch)
        # 平均每个flow的loss
        ep_train_loss=ep_train_loss/4
        ep_test_loss=ep_test_loss/4
        line_y1.append(ep_train_loss)
        line_y2.append(ep_test_loss)
        if (epoch + 1) % 10 == 0:
            # print(outputs, flow_label)
            print('Epoch: [{}/{}],  Train loss: {}, Test loss: {}, Running time:{}'
                  .format(epoch + 1, args.num_epochs, ep_train_loss, ep_test_loss,end_time-start_time))
    DataProcesser.Line_chart_generator(line_x,line_y1,title='train loss')
    DataProcesser.Line_chart_generator(line_x, line_y2,title='test loss',label=u'loss of validation set set')