import torch
import numpy as np
import dgl
import torch.nn.functional as F
import random
import pdb
import time
import argparse
import os
from sklearn.metrics import r2_score
import tee
import matplotlib.pyplot as plt

from data_graph import data_train, data_test

from model import TimingGCN, TimingGat
from config import device, batch_size, train_times, learning_rate, if_split, if_create_slack


os.environ["CUDA_VISIBLE_DEVICES"] = device

if if_split:
    os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:1024"

#nohup python3 train.py --checkpoint change_mlp_net_delay_gat_global_feature --model gat &

parser = argparse.ArgumentParser()
parser.set_defaults(test_iter=False)
parser.add_argument(
    '--test_iter', type=int,
    help='If specified, test a saved model instead of training')
parser.add_argument(
    '--checkpoint', type=str,
    help='If specified, the log and model would be saved to/loaded from that checkpoint directory')
parser.add_argument(
    '--model', type=str, default='gcn', choices=['gcn', 'gat'],
    help='Specify the model to use: TimingGCN or TimingGat')

# 解析命令行参数
args = parser.parse_args()
if args.model == 'gcn':
    model = TimingGCN()
    print(f"choose model {args.model}")
elif args.model == 'gat':
    model = TimingGat()
    print(f"choose model {args.model}")

model.cuda()

def test_netdelay(model):    # net delay
    print('###net delay###')
    model.eval()
    with torch.no_grad():
        def test_dict(data):
            r2_netdelay_sum = 0 ###
            r2_netdelay_average = 0
            for k, (g, ts) in data.items():

                torch.cuda.synchronize()
                time_s = time.time()
                pred = model(g, ts, groundtruth=False)

                torch.cuda.synchronize()
                time_t = time.time()

                truth = g.ndata['n_net_delays_log']

                r2 = r2_score(truth.cpu().numpy().reshape(-1),
                              pred.cpu().numpy().reshape(-1))
                print('{:15} r2 {:1.5f}, time {:2.5f}'.format(k, r2, time_t - time_s))
                r2_netdelay_sum = r2 + r2_netdelay_sum ###
            r2_netdelay_average = r2_netdelay_sum/len(data) ###
            print(f'net_delay_Avg : {r2_netdelay_average}') ###
        print('======= Training dataset ======')
        test_dict(data_train)
        print('======= Test dataset ======')
        test_dict(data_test)

def train(model, args):
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    for e in range(train_times):
        model.train()
        train_loss_tot_net_delays, train_loss_tot_cell_delays, train_loss_tot_ats = 0, 0, 0
        train_loss_tot_cell_delays_prop, train_loss_tot_ats_prop = 0, 0

        optimizer.zero_grad()
        
        data_train_list = list(data_train.items())
        for k, (g, ts) in random.sample(data_train_list, batch_size):
            pred_net_delays = model(g, ts)
            loss_net_delays = 0

            loss_net_delays = F.mse_loss(pred_net_delays, g.ndata['n_net_delays_log'])
            train_loss_tot_net_delays += loss_net_delays.item()
                
            loss_net_delays.backward()
            
        optimizer.step()

        if e == 0 or e % 20 == 19:
            with torch.no_grad():
                model.eval()
                test_loss_tot_net_delays = 0
                      
                for k, (g, ts) in data_test.items():
                    pred_net_delays = model(g, ts)
                    test_loss_tot_net_delays += F.mse_loss(pred_net_delays, g.ndata['n_net_delays_log']).item()

                    
                print('Epoch {}, net delay {:.6f}/{:.6f})'.format(
                    e,
                    train_loss_tot_net_delays / batch_size,
                    test_loss_tot_net_delays / len(data_test)))
            
            if e == 0 or e % 200 == 199 or (e > 2000 and test_loss_tot_net_delays / len(data_test) < 2):
                if args.checkpoint:
                    save_path = './checkpoints/{}/{}.pth'.format(args.checkpoint, e)
                    torch.save(model.state_dict(), save_path)
                    print('saved model to', save_path)
                try:
                    test_netdelay(model)
                except ValueError as e:
                    print(e)
                    print('Error testing, but ignored')

if __name__ == '__main__':
    args = parser.parse_args()
    if args.test_iter:
        assert args.checkpoint, 'no checkpoint dir specified'
        model.load_state_dict(torch.load('./checkpoints/{}/{}.pth'.format(args.checkpoint, args.test_iter)))
        test_netdelay(model)
        
    else:
        if args.checkpoint:
            print('saving logs and models to ./checkpoints/{}'.format(args.checkpoint))
            os.makedirs('./checkpoints/{}/log'.format(args.checkpoint))  # exist not ok
            stdout_f = './checkpoints/{}/log/stdout.log'.format(args.checkpoint)
            stderr_f = './checkpoints/{}/log/stderr.log'.format(args.checkpoint)
            with tee.StdoutTee(stdout_f), tee.StderrTee(stderr_f):
                train(model, args)
        else:
            print('No checkpoint is specified. abandoning all model checkpoints and logs')
            train(model, args)