import torch
from torch_geometric.datasets import TUDataset
from torch_geometric.data import Data
from torch_geometric import utils
import torch.nn.functional as F
import argparse
import os
import time
import pandas as pd
import numpy as np
from tqdm import tqdm
from torch.utils.data import random_split, Subset, ConcatDataset
import pdb
import src.models as models
from src.models import SAGPoolh, SAGPoolh_comb, CNN
from src.datasets import ISPD15Dataset, MyDataset, CNNDataset, DesignSet
from src.loader import DataLoader

parser = argparse.ArgumentParser()

parser.add_argument('--seed', type=int, default=2021, help='seed')
parser.add_argument('--device', type=str, default='cuda:2',help='device')
parser.add_argument('--batch_size', type=int, default=1,help='batch size')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--step_size', type=int, default=50, help='learning rate decay step')
parser.add_argument('--lr_decay', type=float, default=0.5, help='learning rate decay')
parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay')
parser.add_argument('--nhid', type=int, default=128, help='hidden size')
parser.add_argument('--layers',type=int,default=3,help='conv layers')
parser.add_argument('--pooling_ratio', type=float, default=0.1,help='pooling ratio')
parser.add_argument('--dropout_ratio', type=float, default=0.5,help='dropout ratio')
parser.add_argument('--dataset_path', type=str, default='data')
parser.add_argument('--epochs', type=int, default=200,help='maximum number of epochs')
parser.add_argument('--patience', type=int, default=1000,help='patience for earlystopping')
parser.add_argument('--save_dir', type=str, default='save')
parser.add_argument('--goon', action='store_true',help='continue training')
parser.add_argument('--checkp', type=str, default='save/ANet_all_Sep:15:01:16:59/best.pth')
parser.add_argument('--design', type=str,default='all',help='which design to test')
parser.add_argument('--use_real_pos', action='store_true', help='whether use real pos')
parser.add_argument('--attention', action='store_true', help='whether use attention')

args = parser.parse_args()
torch.manual_seed(args.seed)


def build_loader(design,train_ratio=0.):
    #MySet = getattr(datasets,args.dataset)
    dataset = DesignSet(args.dataset_path)
    args.num_features = dataset.num_features
    args.num_classes = 1
    if design == 'all':
        train_designs =  dataset.train_file_names
        test_designs = dataset.test_file_names
        train_sets = []
        test_sets = []
        num_training = 0
        num_testing = 0
        for design in train_designs:
            train_sets.append(Subset(dataset,range(dataset.ptr[design],
                                                    dataset.ptr[design] + dataset.file_num[design])))
            num_training += dataset.file_num[design]
        for design in test_designs:
            test_sets.append(Subset(dataset,range(dataset.ptr[design],
                                                    dataset.ptr[design] + dataset.file_num[design])))
            num_testing += dataset.file_num[design]
    
        train_set = ConcatDataset(train_sets)
        test_set = ConcatDataset(test_sets)
    else:
        num_training = int(dataset.file_num[design] * train_ratio)
        num_testing = dataset.file_num[design] -  num_training
        design_set = Subset(dataset,range(dataset.ptr[design],
                                         dataset.ptr[design] + dataset.file_num[design]))
        train_set, test_set = random_split(design_set,[num_training,num_testing])
    print("Total %d training data, %d testing data."%(num_training,num_testing),flush=True)
    train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=False,drop_last=True)
    test_loader = DataLoader(test_set,batch_size=args.batch_size,shuffle=False,drop_last=True)
    return train_loader, test_loader

def getset(dataset,design):
    if design == 'all':
        train_designs =  dataset.train_file_names
        test_designs = dataset.test_file_names
        train_sets = []
        test_sets = []
        num_training = 0
        num_testing = 0
        for design in train_designs:
            train_sets.append(Subset(dataset,range(dataset.ptr[design],
                                                    dataset.ptr[design] + dataset.file_num[design])))
            num_training += dataset.file_num[design]
        for design in test_designs:
            test_sets.append(Subset(dataset,range(dataset.ptr[design],
                                                    dataset.ptr[design] + dataset.file_num[design])))
            num_testing += dataset.file_num[design]
    
        train_set = ConcatDataset(train_sets)
        test_set = ConcatDataset(test_sets)
    else:
        num_training = int(dataset.file_num[design] * 0)
        num_testing = dataset.file_num[design] -  num_training
        design_set = Subset(dataset,range(dataset.ptr[design],
                                         dataset.ptr[design] + dataset.file_num[design]))
        train_set, test_set = random_split(design_set,[num_training,num_testing])
    print("Total %d training data, %d testing data."%(num_training,num_testing),flush=True)
    return train_set, test_set, num_training, num_testing

log_dir = args.checkp.split('/')[1]
model_name = log_dir.split('_')[0]


Model = getattr(models,model_name)

# initialize model and optimizer 
train_loader, test_loader = build_loader(args.design)

def test(model,loader):
    with torch.no_grad():
        model.eval()
        cnt = 0
        correct = 0.
        mylist = []
        real = []
        for data in loader:
            cnt += 1
            data = data.to(args.device)
            #print(data.macro_index)
            out = model(data)
            pred = out
            #l = 20
            #p = torch.arange(0,out.shape[1]).to(args.device).float()
            #q = torch.softmax(out,dim=1).view(-1,)
            #pred = torch.argmax(q)*0.4
            mylist.append(pred.item())
            real.append(data.hpwl.item())
            correct += torch.sum(1-torch.abs(1-pred/data.hpwl)).mean().item()
            print('predict:\t{:4f}\treal:\t{:4f}'.format(pred.item(),data.hpwl.item()))

    #rank20 = np.sort(mylist)[19]
    #top20 = torch.arange(0,int(len(loader)))[mylist<=rank20]
    #print(top20)

    #rank20 = np.sort(real)[19]
    #top20 = torch.arange(0,int(len(loader)))[real<=rank20]
    #print(top20)
    return correct / cnt

model = Model(args).to(args.device)
model.load_state_dict(torch.load(args.checkp)['model'])
test_acc = test(model,test_loader)
test_acc = test(model,train_loader)
print("Test accuarcy:{}".format(test_acc),flush=True)
