import torch
import numpy as np
import dgl
import torch.nn.functional as F
import random
import pdb
import time
import argparse
import os
from sklearn.metrics import r2_score
import tee
import matplotlib.pyplot as plt

from data_graph import data_train, data_test
from model import TimingGCN
from config import device, batch_size, train_times, learning_rate, if_split, if_create_slack
from sklearn.metrics import mean_squared_error

os.environ["CUDA_VISIBLE_DEVICES"] = device

if if_split:
    os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:1024"

#python3 train_gnn.py --checkpoint 2_22--17_40
#python3 train_gnn.py --test_iter 15799 --checkpoint paper_base

parser = argparse.ArgumentParser()
parser.set_defaults(test_iter=False)
parser.add_argument(
    '--test_iter', type=int,
    help='If specified, test a saved model instead of training')
parser.add_argument(
    "--test_end", type = int
)
parser.add_argument(
    '--checkpoint', type=str,
    help='If specified, the log and model would be saved to/loaded from that checkpoint directory')
parser.set_defaults(netdelay=True, celldelay=True, groundtruth=True)
parser.add_argument(
    '--no_netdelay', dest='netdelay', action='store_false',
    help='Disable the net delay training supervision (default enabled)')
parser.add_argument(
    '--no_celldelay', dest='celldelay', action='store_false',
    help='Disable the cell delay training supervision (default enabled)')
parser.add_argument(
    '--no_groundtruth', dest='groundtruth', action='store_false',
    help='Disable ground-truth breakdown in training (default enabled)')


model = TimingGCN()
model.cuda()

def test(model):    # at
    print('###arrive time###')
    model.eval()
    with torch.no_grad():
        def test_dict(data):
            r2_arrival_time_sum = 0 ###
            r2_arrival_time_average = 0

            total_mse = 0
            num_layers = 0

            for k, (g, ts) in data.items():
                torch.cuda.synchronize()
                time_s = time.time()
                
                pred_nf2 = model(g, ts, groundtruth=False)[2]  # 获取最后的预测结果
                pred = pred_nf2[:, :4].cpu()  # 提取预测的时序部分
                
                torch.cuda.synchronize()
                time_t = time.time()
                truth = g.ndata['n_atslew'][:, :4].cpu()
                mse = mean_squared_error(truth.cpu().numpy().reshape(-1),
                                         pred.cpu().numpy().reshape(-1))
                total_mse += mse
                num_layers += 1

                # 遍历每层，计算每一层的MSE
                for i in range(len(ts['topo'])):
                    layer_pred = pred_nf2[ts['topo'][i], :4].cpu()
                    layer_truth = g.ndata['n_atslew'][ts['topo'][i], :4].cpu()
                    layer_mse = mean_squared_error(layer_truth.numpy().reshape(-1), layer_pred.numpy().reshape(-1))

                ##################################################
                #生成slack来获取点阵图
                ##################################################
                # Hold slack=data arrival time -data required time
                # Setup slack=data required time -data arrival time
                #Hold slacks
                if if_create_slack :
                    endpoints_nodes = ts['endpoints'].data
                    se_truth = (g.ndata['n_atslew'][:,:2][endpoints_nodes].cpu() - g.ndata['n_rats'][:,:2][endpoints_nodes].cpu()).numpy().reshape(-1)
                    se = (model(g, ts, groundtruth=False)[2][:, :2][endpoints_nodes].cpu() - g.ndata['n_rats'][:,:2][endpoints_nodes].cpu()).numpy().reshape(-1)
                    
                    #Setup slacks
                    sl_truth = (g.ndata['n_rats'][:,2:4][endpoints_nodes].cpu() - g.ndata['n_atslew'][:,2:4][endpoints_nodes].cpu()).numpy().reshape(-1)
                    sl = (g.ndata['n_rats'][:,2:4][endpoints_nodes].cpu() - model(g, ts, groundtruth=False)[2][:, 2:4][endpoints_nodes].cpu() ).numpy().reshape(-1)

                    os.makedirs('./checkpoints/slacks/', exist_ok=True)

                    file_name = f'./checkpoints/slacks/{k}.npz'
                    
                    np.savez(file_name, arr_0=se, arr_1=sl, arr_2=se_truth, arr_3=sl_truth)
                ###################################################

                r2 = r2_score(truth.cpu().numpy().reshape(-1),
                              pred.cpu().numpy().reshape(-1))
                print('{:15} r2 {:1.5f}, time {:2.5f}'.format(k, r2, time_t - time_s))
                
                r2_arrival_time_sum = r2 + r2_arrival_time_sum ###
            r2_arrival_time_average = r2_arrival_time_sum/len(data) ###
                # print('{}'.format(time_t - time_s + ts['topo_time']))
            print(f'arrive_time_Avg : {r2_arrival_time_average}') ###
        print('======= Training dataset ======')
        test_dict(data_train)
        print('======= Test dataset ======')
        test_dict(data_test)

def test_endpoint_arrival_time(model):
    print('###endpoint arrival time###')
    model.eval()
    with torch.no_grad():
        def test_dict(data):
            r2_arrival_sum = 0 ###
            r2_arrival_average = 0
            for k, (g, ts) in data.items():
                torch.cuda.synchronize()
                time_s = time.time()

                endpoints_nodes = ts['endpoints'].data
                arrival_truth = g.ndata['n_atslew'][:,:4][endpoints_nodes].cpu() 
                arrival_pred = model(g, ts, groundtruth=False)[2][:, :4][endpoints_nodes].cpu()

                torch.cuda.synchronize()
                time_t = time.time()

                r2 = r2_score(arrival_truth.cpu().numpy().reshape(-1),
                              arrival_pred.cpu().numpy().reshape(-1))
                print('{:15} r2 {:1.5f}, time {:2.5f}'.format(k, r2, time_t - time_s))
                r2_arrival_sum = r2 + r2_arrival_sum ###
            r2_arrival_average = r2_arrival_sum/len(data) ###
            print(f'endpoint arrival time : {r2_arrival_average}') ###
        print('======= Training dataset ======')
        test_dict(data_train)
        print('======= Test dataset ======')
        test_dict(data_test)

def test_netdelay(model):    # net delay
    print('###net delay###')
    model.eval()
    with torch.no_grad():
        def test_dict(data):
            r2_netdelay_sum = 0 ###
            r2_netdelay_average = 0
            for k, (g, ts) in data.items():

                torch.cuda.synchronize()
                time_s = time.time()
                pred = model(g, ts, groundtruth=False)[0]

                torch.cuda.synchronize()
                time_t = time.time()

                truth = g.ndata['n_net_delays_log']

                r2 = r2_score(truth.cpu().numpy().reshape(-1),
                              pred.cpu().numpy().reshape(-1))
                print('{:15} r2 {:1.5f}, time {:2.5f}'.format(k, r2, time_t - time_s))
                r2_netdelay_sum = r2 + r2_netdelay_sum ###
            r2_netdelay_average = r2_netdelay_sum/len(data) ###
            print(f'net_delay_Avg : {r2_netdelay_average}') ###
        print('======= Training dataset ======')
        test_dict(data_train)
        print('======= Test dataset ======')
        test_dict(data_test)

def test_celldelay(model):    # cell delay
    print('###cell delay###')
    model.eval()
    with torch.no_grad():
        def test_dict(data):
            r2_celldelay_average = 0
            r2_celldelay_sum = 0
            for k, (g, ts) in data.items():
                torch.cuda.synchronize()
                time_s = time.time()
                pred = model(g, ts, groundtruth=False)[1].cpu()
                torch.cuda.synchronize()
                time_t = time.time()
                truth = g.edges['cell_out'].data['e_cell_delays']

                r2 = r2_score(truth.cpu().numpy().reshape(-1),
                              pred.cpu().numpy().reshape(-1))
                print('{:15} r2 {:1.5f}, time {:2.5f}'.format(k, r2, time_t - time_s))
                r2_celldelay_sum = r2 + r2_celldelay_sum
            r2_celldelay_average = r2_celldelay_sum/len(data)
            print(f'cell_delay_Avg : {r2_celldelay_average}') ###
        print('======= Training dataset ======')
        test_dict(data_train)
        print('======= Test dataset ======')
        test_dict(data_test)

def test_slew(model):    # slew
    print('###slew###')
    model.eval()
    with torch.no_grad():
        def test_dict(data):
            r2_slew_sum = 0
            r2_slew_average=0
            for k, (g, ts) in data.items():
                torch.cuda.synchronize()
                time_s = time.time()
                pred = model(g, ts, groundtruth=False)[2][:, 4:].cpu()
                torch.cuda.synchronize()
                time_t = time.time()
                truth = g.ndata['n_atslew'][:, 4:].cpu()

                r2 = r2_score(truth.cpu().numpy().reshape(-1),
                              pred.cpu().numpy().reshape(-1))
                print('{:15} r2 {:1.5f}, time {:2.5f}'.format(k, r2, time_t - time_s))
                r2_slew_sum = r2 + r2_slew_sum
            r2_slew_average = r2_slew_sum/len(data)
            print(f'slew_Avg : {r2_slew_average}')
        print('======= Training dataset ======')
        test_dict(data_train)
        print('======= Test dataset ======')
        test_dict(data_test)

def test_require_time(model):    # require time
    print('###require time###')
    model.eval()
    with torch.no_grad():
        def test_dict(data):
            r2_require_time_sum = 0
            r2_require_time_average=0
            for k, (g, ts) in data.items():
                torch.cuda.synchronize()
                time_s = time.time()
                pred = model(g, ts, groundtruth=False)[2][:, 4:].cpu()
                torch.cuda.synchronize()
                time_t = time.time()
                truth = g.ndata['n_rats'][:, 4:].cpu()

                r2 = r2_score(truth.cpu().numpy().reshape(-1),
                              pred.cpu().numpy().reshape(-1))
                print('{:15} r2 {:1.5f}, time {:2.5f}'.format(k, r2, time_t - time_s))
                r2_require_time_sum = r2 + r2_require_time_sum
            r2_require_time_average = r2_require_time_sum/len(data)
            print(f'require_time_Avg : {r2_require_time_average}')
        print('======= Training dataset ======')
        test_dict(data_train)
        print('======= Test dataset ======')
        test_dict(data_test)

def draw_loss(train_loss_tot_ats_list:list, epochs:list, title:str, save_path:str):
    plt.figure()
    plt.plot(epochs, train_loss_tot_ats_list, label=title)
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title(title)

    plt.savefig(save_path)
    plt.close()

def train(model, args):
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    train_loss_tot_ats_list = []
    epoch_list = []
    for e in range(train_times):
        model.train()
        train_loss_tot_net_delays, train_loss_tot_cell_delays, train_loss_tot_ats = 0, 0, 0
        train_loss_tot_cell_delays_prop, train_loss_tot_ats_prop = 0, 0

        optimizer.zero_grad()
        
        data_train_list = list(data_train.items())
        for k, (g, ts) in random.sample(data_train_list, batch_size):
            pred_net_delays, pred_cell_delays, pred_atslew = model(g, ts, groundtruth=args.groundtruth)
            loss_net_delays, loss_cell_delays = 0, 0

            if args.netdelay:
                loss_net_delays = F.mse_loss(pred_net_delays, g.ndata['n_net_delays_log'])
                train_loss_tot_net_delays += loss_net_delays.item()

            if args.celldelay:
                loss_cell_delays = F.mse_loss(pred_cell_delays, g.edges['cell_out'].data['e_cell_delays'])
                train_loss_tot_cell_delays += loss_cell_delays.item()
            else:
                # Workaround for a dgl bug...
                # It seems that if some forward propagation channel is not used in backward graph, the GPU memory would BOOM. so we just create a fake gradient channel for this cell delay fork and make sure it does not contribute to gradient by *0.
                loss_cell_delays = torch.sum(pred_cell_delays) * 0.0
                
            
            loss_ats = F.mse_loss(pred_atslew, g.ndata['n_atslew'])
            train_loss_tot_ats += loss_ats.item()

            #draw loss
            train_loss_tot_ats_list.append(train_loss_tot_ats/100)
            epoch_list.append(e)

            if e % 200 == 0:
                draw_loss(train_loss_tot_ats_list, epoch_list, 'train_loss_tot_ats', f'./checkpoints/{args.checkpoint}/log/{k}_train_loss_tot_ats.png')

            (loss_net_delays + loss_cell_delays + loss_ats).backward()
            
        optimizer.step()

        
        if e == 0 or e % 20 == 19:
            with torch.no_grad():
                model.eval()
                test_loss_tot_net_delays, test_loss_tot_cell_delays, test_loss_tot_ats = 0, 0, 0
                test_loss_tot_cell_delays_prop, test_loss_tot_ats_prop = 0, 0
                
                for k, (g, ts) in data_test.items():
                    pred_net_delays, pred_cell_delays, pred_atslew = model(g, ts, groundtruth=True)
                    _, pred_cell_delays_prop, pred_atslew_prop = model(g, ts, groundtruth=False)

                    if args.netdelay:
                        test_loss_tot_net_delays += F.mse_loss(pred_net_delays, g.ndata['n_net_delays_log']).item()
                    if args.celldelay:
                        test_loss_tot_cell_delays += F.mse_loss(pred_cell_delays, g.edges['cell_out'].data['e_cell_delays']).item()
                    test_loss_tot_ats += F.mse_loss(pred_atslew, g.ndata['n_atslew']).item()
                    test_loss_tot_ats_prop += F.mse_loss(pred_atslew_prop, g.ndata['n_atslew']).item()
                    
                print('Epoch {}, net delay {:.6f}/{:.6f}, cell delay {:.6f}/{:.6f}, at {:.6f}/({:.6f}, {:.6f})'.format(
                    e,
                    train_loss_tot_net_delays / batch_size,
                    test_loss_tot_net_delays / len(data_test),
                    train_loss_tot_cell_delays / batch_size,
                    test_loss_tot_cell_delays / len(data_test),
                    train_loss_tot_ats / batch_size,
                    # train_loss_tot_ats_prop / batch_size,
                    test_loss_tot_ats / len(data_test),
                    test_loss_tot_ats_prop / len(data_test)))
            
            if e == 0 or e % 200 == 199 or (e > 6000 and test_loss_tot_ats_prop / len(data_test) < 6):
                if args.checkpoint:
                    save_path = './checkpoints/{}/{}.pth'.format(args.checkpoint, e)
                    torch.save(model.state_dict(), save_path)
                    print('saved model to', save_path)
                try:
                    test(model)
                    test_netdelay(model)
                except ValueError as e:
                    print(e)
                    print('Error testing, but ignored')

if __name__ == '__main__':
    args = parser.parse_args()
    if args.test_iter:
        assert args.checkpoint, 'no checkpoint dir specified'
        model.load_state_dict(torch.load('./checkpoints/{}/{}.pth'.format(args.checkpoint, args.test_iter)))
        test(model)
        test_netdelay(model)
        test_celldelay(model)
        test_slew(model)

    elif args.test_end:
        if args.test_end:
            model.load_state_dict(torch.load('./checkpoints/{}/{}.pth'.format(args.checkpoint, args.test_end)))
            test_endpoint_arrival_time(model)
        
    else:
        if args.checkpoint:
            print('saving logs and models to ./checkpoints/{}'.format(args.checkpoint))
            os.makedirs('./checkpoints/{}/log'.format(args.checkpoint))  # exist not ok
            stdout_f = './checkpoints/{}/log/stdout.log'.format(args.checkpoint)
            stderr_f = './checkpoints/{}/log/stderr.log'.format(args.checkpoint)
            with tee.StdoutTee(stdout_f), tee.StderrTee(stderr_f):
                train(model, args)
        else:
            print('No checkpoint is specified. abandoning all model checkpoints and logs')
            train(model, args)