import torch
from torch_geometric.datasets import TUDataset
from torch_geometric.data import Data
from torch_geometric import utils
import torch.nn.functional as F
import argparse
import os
import time
import pandas as pd
import numpy as np
from tqdm import tqdm
from torch.utils.data import random_split, Subset, ConcatDataset
import pdb
import sys
import src.datasets as datasets
import src.models as models
from src.logger import Logger
from src.datasets import ISPD15Dataset, MyDataset, CNNDataset
from src.loader import DataLoader

parser = argparse.ArgumentParser()

parser.add_argument('--seed', type=int, default=777, help='seed')
parser.add_argument('--device', type=str, default='cuda:2',help='device')
parser.add_argument('--model', type=str, default='ANet',help='which mdoel to use')
parser.add_argument('--batch_size', type=int, default=1,help='batch size')
parser.add_argument('--test_batch_size', type=int, default=1,help='batch size')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--step_size', type=int, default=50, help='learning rate decay step')
parser.add_argument('--lr_decay', type=float, default=0.9, help='learning rate decay')
parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay')
parser.add_argument('--nhid', type=int, default=128, help='hidden size')
parser.add_argument('--layers',type=int,default=3,help='conv layers')
parser.add_argument('--pooling_ratio', type=float, default=0.1,help='pooling ratio')
parser.add_argument('--dropout_ratio', type=float, default=0.5,help='dropout ratio')
parser.add_argument('--dataset_path', type=str, default='data')
parser.add_argument('--dataset', type=str, default='DesignSet')
parser.add_argument('--epochs', type=int, default=2000,help='maximum number of epochs')
parser.add_argument('--patience', type=int, default=1000,help='patience for earlystopping')
parser.add_argument('--save_dir', type=str, default='save')
parser.add_argument('--goon', action='store_true',help='continue training')
parser.add_argument('--checkp', type=str, default='test.pth')
parser.add_argument('--classes', type=int, default=20,help='num classes')
parser.add_argument('--design', type=str, default='all',help='whitch design to train')
parser.add_argument('--loss', type=str, default='CROSS',help='loss func')
parser.add_argument('--acc', type=str, default='eq',help='loss func')
parser.add_argument('--graph_reg', action='store_true',default=True, help='regression')
parser.add_argument('--cell_reg', action='store_true', help='regression')
parser.add_argument('--label', type=str,default='wl',help='label')
parser.add_argument('--use_real_pos', action='store_true', help='whether use real pos')
parser.add_argument('--attention', action='store_true', help='whether use attention')
args = parser.parse_args()
torch.manual_seed(args.seed)
#args.reg = True
if args.graph_reg:
    args.loss = 'MAE'
    args.acc = 'rel'

if args.cell_reg:
    args.loss = 'CMSE'
    args.acc = 'Crel'

def build_loss(args):
    def MAELoss(out,data):
        y = getattr(data,args.label)
        return F.l1_loss(out.view(-1),y.view(-1))
    def MSELoss(out,data):
        y = getattr(data,args.label)
        return F.mse_loss(out.view(-1),y.view(-1))
    def CMSELoss(out,data):
        y = getattr(data,args.label)
        return F.l1_loss(out,y,reduction='sum')
    def CrossEntropyLoss(out,data):
        y = getattr(data,args.label)
        return F.cross_entropy(out,y.view(-1,))
    if args.loss == 'MSE':
        return MSELoss
    elif args.loss == 'CMSE':
        return CMSELoss
    elif args.loss == 'CROSS':
        return CrossEntropyLoss
    elif args.loss == 'MAE':
        return MAELoss
    else:
        print('Invalid loss function!')


def build_acc(args):
    def RelAcc(out,data):
        y = getattr(data,args.label)
        return torch.mean(1-torch.abs(1-out.view(-1)/(y.view(-1))))
    def CRelAcc(out,data):
        y = getattr(data,args.label)
        return torch.mean(1-torch.abs(1-out.view(-1)/(y.view(-1)+0.00001)))
    def EqAcc(out,data):
        y = getattr(data,args.label)
        return torch.eq(torch.argmax(out,dim=1).view(-1),y.view(-1)).float().mean()
    if args.acc == 'rel':
        return RelAcc
    elif args.acc == 'Crel':
        return CRelAcc
    elif args.acc == 'eq':
        return EqAcc
    else:
        print('Invalid acc function!')


def build_loader(design,train_ratio=0.8):
    MySet = getattr(datasets,args.dataset)
    dataset = MySet(args.dataset_path)
    args.num_features = dataset.num_features

    if args.graph_reg:
        args.num_classes = 1
    if args.cell_reg:
        args.num_classes = 2


    if design == 'all':
        train_designs =  dataset.train_file_names
        test_designs = dataset.test_file_names
        train_sets = []
        test_sets = []
        num_training = 0
        num_testing = 0
        for design in train_designs:
            train_sets.append(Subset(dataset,range(dataset.ptr[design],
                                                    dataset.ptr[design] + dataset.file_num[design])))
            num_training += dataset.file_num[design]
        for design in test_designs:
            test_sets.append(Subset(dataset,range(dataset.ptr[design],
                                                    dataset.ptr[design] + dataset.file_num[design])))
            num_testing += dataset.file_num[design]
    
        train_set = ConcatDataset(train_sets)
        test_set = ConcatDataset(test_sets)
    else:
        num_training = int(dataset.file_num[design] * train_ratio)
        num_testing = dataset.file_num[design] -  num_training
        design_set = Subset(dataset,range(dataset.ptr[design],
                                         dataset.ptr[design] + dataset.file_num[design]))
        train_set, test_set = random_split(design_set,[num_training,num_testing])
    print("Total %d training data, %d testing data."%(num_training,num_testing),flush=True)
    train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True,drop_last=True)
    test_loader = DataLoader(test_set,batch_size=args.test_batch_size,shuffle=False,drop_last=True)
    return train_loader, test_loader


def build_model():
    Model = getattr(models,args.model)
    model = Model(args).to(args.device)
    print(model)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    schedule = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.lr_decay)
    return model, optimizer, schedule


def build_log():
    # make save dir
    st = time.strftime("%b:%d:%X",time.localtime())
    if args.graph_reg:
        predict_type = 'graph'
    elif args.cell_reg:
        predict_type = 'cell'
    else:
        predict_type = 'cla'
    args.save_dir = os.path.join(args.save_dir,'{}_{}_{}_{}'.format(args.model,predict_type,args.design,st))
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)
    # rederict to save dir
    sys.stdout = Logger(path=args.save_dir)
    # print args
    print(args)
    # save paths
    best_model_path = os.path.join(args.save_dir,'best.pth'.format(st))
    last_model_path = os.path.join(args.save_dir,'last.pth'.format(st))
    return best_model_path, last_model_path


# preparing
best_model_path, last_model_path = build_log()
train_loader, test_loader = build_loader(args.design)
model, optimizer, schedule = build_model()
criterion = build_loss(args)
accuracy = build_acc(args)

start = 0
min_loss = 1e10
patience = 0

def test(model,loader):
    with torch.no_grad():
        model.eval()
        correct = 0.
        loss = 0.
        for data in loader:
            data = data.to(args.device)
            out = model(data)
            correct += accuracy(out,data).item()
            loss += criterion(out,data).item()
    return correct / len(loader),loss / len(loader)


def test_design(model,design):
    _, loader = build_loader(design,train_ratio=0.)
    return test(model,loader)


if args.goon:
    checkp = torch.load(args.checkp)
    model.load_state_dict(checkp['model'])
    optimizer.load_state_dict(checkp['optimizer'])
    min_loss = checkp['min_loss']
    start = checkp['epoch'] + 1
    patience = checkp['patience']
    print('load model from {}, saved at epoch {}'.format(args.checkp,start - 1))
    val_acc,val_loss,acc,losses = test(model,test_loader)
    print("Val loss:\t{}\tVal accuracy:\t{}\t".format(val_loss,val_acc,),flush=True)


for epoch in range(start, args.epochs):
    model.train()
    tt = time.time()
    Ave_loss = 0.
    Ave_cor = 0.

    for i, data in enumerate(train_loader):
        data = data.to(args.device)
        out = model(data)
        loss = criterion(out, data)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        with torch.no_grad():
            Ave_loss += loss.mean().item()
            Ave_cor +=  accuracy(out,data).item()

    schedule.step()
    val_acc,val_loss= test(model,test_loader)

    print("[Epoch\t{}]\tTrain loss:\t{:.4f}\tTrain accuracy:\t{:.4f}\tVal loss:\t{:.4f}\tVal accuracy:\t{:.4f}\tTotal time:{:.2f}\tlr:{:.5f}".format(
        epoch,Ave_loss/len(train_loader),Ave_cor/len(train_loader),val_loss,val_acc,time.time()-tt,optimizer.param_groups[0]['lr']),flush=True)

    state = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch,'patience':patience,'min_loss':min_loss}
    torch.save(state, last_model_path)

    if val_loss < min_loss:
        state = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch,'patience':patience,'min_loss':min_loss}
        torch.save(state, best_model_path)
        print("Model saved at epoch{}".format(epoch),flush=True)
        min_loss = val_loss
        patience = 0
    else:
        patience += 1
    if patience > args.patience:
        break 


