import sys
import argparse
import torch.optim as optim
import torch.nn.functional as F
from dataset.dataloader import *
from network import *
from model.tgcn import TGCNModel
from dataset.dataset import SingleTraceDataset, TaskDataset
from learn2learn.algorithms import MAML
from utils.graph_util import calculate_laplacian_with_self_loop

# Training settings
parser = argparse.ArgumentParser()

parser.add_argument('--data_dir', type=str, default="data", help='Directory for dataset.')
parser.add_argument('--dataset' , default="sn", help='sn | tt | aiops | openstack | gaia')
parser.add_argument('--model' , default="v1", help='| v1 | v2: no metric | v3: spatial | gat')
parser.add_argument('--test_idx', type=int, default=4, help='Index of trace type for meta test.')
parser.add_argument('--shot', type=int, default=64, help='Batch size for support set.')
parser.add_argument('--query', type=int, default=64, help='Batch size for query set.')
parser.add_argument('--num_tasks', type=int, default=300, help='Num of tasks for meta-training.')
parser.add_argument('--few_num', type=int, default=200, help='Num of traces of test type.')

parser.add_argument('--exp_name', type=str, default="meta", help='Experiment name.')
parser.add_argument('--exp_dir', type=str, default="experiment", help='Directory for Experiment.')
parser.add_argument('--model_fname', type=str, default="model.pt", help='Model file name.')
parser.add_argument('--log_fname', type=str, default="log.txt", help='Log file name.')
parser.add_argument('--result_fname', type=str, default="results.txt", help='Result file name.')

parser.add_argument('--no_cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--fast_mode', action='store_true', default=False, help='Validate during training pass.')

parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=10, help='Number of epochs for meta-training.')
parser.add_argument('--batch_size', type=int, default=64, help='Batch size.')
parser.add_argument('--adapt_step', type=int, default=1, help='Step to fast adaptation.')
parser.add_argument('--ft_epochs', type=int, default=10, help='Number of epochs for fine-tuning.')

parser.add_argument('--lr', type=float, default=0.005, help='Initial learning rate.')
parser.add_argument('--meta_lr', type=float, default=0.01, help='Learning rate for meta-learning.')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')

parser.add_argument('--window', type=int, default=14, help='Window size for history traces.')

parser.add_argument('--hidden', type=int, default=64, help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).')

args = parser.parse_args()

time_str = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%dT%H.%M.%S")
args.exp_name = args.exp_name + '_' + time_str
init_dl_programe(args)
args.base_dir = os.path.join(args.exp_dir, args.exp_name)


if not os.path.exists(args.exp_dir):
    os.mkdir(args.exp_dir)
os.mkdir(args.base_dir)

logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(os.path.join(args.base_dir, args.log_fname), mode="a", encoding="utf-8")
file_handler.setFormatter(logging.Formatter("%(message)s"))
cmd_handler = logging.StreamHandler(sys.stderr)
cmd_handler.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(file_handler)
logger.addHandler(cmd_handler)


logging.info(f"Start experiment {args.exp_name}")


if args.dataset == 'sn':
    endpoint_list, train_data_list, test_data_list, adj_list, label_data_list, rc_data_list = load_sn(os.path.join(args.data_dir, args.dataset))
    idx = args.test_idx
    meta_dataset = TaskDataset(
        [train_data_list[i] for i in range(len(train_data_list)) if i != idx],
        [adj_list[i] for i in range(len(adj_list)) if i != idx],
        args.window,
        args.shot,
        args.query,
    )

    laplacian = calculate_laplacian_with_self_loop(adj_list[idx])
    lap = laplacian.to(args.device)
    test_trace_data = train_data_list[idx][:args.few_num]
    p = int(test_trace_data.shape[0] * 0.7)

    train_dataset = SingleTraceDataset(test_trace_data[0:p, :], args.window)
    train_loader = DataLoader(train_dataset, batch_size=args.shot)  # TODO: shuffle, num_workers

    val_dataset = SingleTraceDataset(test_trace_data[p:, :], args.window)
    val_loader = DataLoader(val_dataset, batch_size=args.query)

    test_dataset = SingleTraceDataset(test_data_list[idx], args.window, label_data_list[idx])
    test_loader = DataLoader(test_dataset, batch_size=1)

    logging.info(f"Meta Test {args.test_idx} | train: {len(train_dataset)} | valid: {len(val_dataset)} | test: {len(test_dataset)}")
else:
    raise ValueError(f"Unknown dataset {args.dataset }.")



if args.model == 'v1':
    model = TGCNModel(args.hidden).to(args.device)
else:
    raise ValueError(f"Unknown model {args.model}.")

feature_extractor = model.tgcn
base_learner = model.regressor
base_learner = MAML(base_learner, lr=args.meta_lr)

all_parameters = list(feature_extractor.parameters()) + list(base_learner.parameters())
optim = optim.Adam(
    all_parameters,
    lr=args.lr
)


def fast_adapt(batch, feature_extractor, learner):
    spt_x, spt_y, qry_x, qry_y, laplacian = [_.to(args.device) for _ in batch]

    feature_x = feature_extractor(spt_x, laplacian)
    feature_y = feature_extractor(qry_x, laplacian)

    for step in range(args.adapt_step):
        output = learner(feature_x).reshape(spt_y.shape)
        train_error = F.mse_loss(output, spt_y)
        learner.adapt(train_error)

    output = learner(feature_y).reshape(qry_y.shape)
    valid_error = F.mse_loss(output, qry_y)

    return valid_error


def meta_train(epoch):
    t = time.time()
    model.train()
    meta_train_error = 0
    for i in range(args.num_tasks):
        learner = base_learner.clone()
        batch = meta_dataset.sample()
        valid_error = fast_adapt(batch, feature_extractor, learner)
        valid_error.backward()
        meta_train_error += (valid_error.item() / args.query)

    meta_train_error /= args.num_tasks

    for p in all_parameters:
        p.grad.data.mul_(1.0 / args.num_tasks)
    optim.step()
    optim.zero_grad()

    logging.info('Meta-train Epoch: {}/{} | '.format(epoch, args.epochs) +
                 'Error: {:.4f} | '.format(meta_train_error) +
                 'Time: {:.4f}s'.format(time.time() - t))

    return meta_train_error


def fine_tune(epoch):
    t = time.time()
    model.train()
    loss_train = 0
    for batch_x, batch_y, _ in train_loader:
        bz = batch_x.shape[0]
        batch_x = batch_x.to(args.device)
        batch_y = batch_y.to(args.device)

        out = model(batch_x, lap)  # (B, N_i)
        loss = F.mse_loss(out, batch_y)
        loss.backward()
        optim.step()
        optim.zero_grad()
        loss_train += (loss.item() / bz)

    loss_train /= len(train_loader)

    logging.info('Fine-tune Epoch: {}/{} | '.format(epoch, args.epochs) +
                 'Loss: {:.4f} | '.format(loss_train) +
                 'Time: {:.4f}s'.format(time.time() - t))

    if args.fast_mode:
        return loss_train

    val_loss, _ = validate()
    return val_loss


def validate(worst=False):
    t = time.time()
    model.eval()
    with torch.no_grad():
        loss_total = 0
        worst_loss = 0
        for batch_x, batch_y, _ in val_loader:
            bz = batch_x.shape[0]
            batch_x = batch_x.to(args.device)
            batch_y = batch_y.to(args.device)
            out = model(batch_x, lap)
            loss = F.mse_loss(out, batch_y)

            if worst:
                for i in range(bz):
                    l = F.mse_loss(out[i:i+1], batch_y[i:i+1])
                    worst_loss = max(l, worst_loss)

            loss_total += (loss.item() / bz)

        loss_total /= len(val_loader)

        if not worst:
            log_str = f"Fine-tune Validate | Loss: {loss_total:.4f} | Time: {time.time()-t:.4f}s"
            logging.info(log_str)

    return loss_total, worst_loss


def compute_test(loader, laplacian, worst_loss):
    t = time.time()
    model.eval()
    y_true = []
    y_pred = []
    with torch.no_grad():
        # loss_total = 0
        for batch_x, batch_y, batch_label in loader:
            batch_x = batch_x.to(args.device)
            batch_y = batch_y.to(args.device)
            out = model(batch_x, laplacian)
            loss = F.mse_loss(out, batch_y)
            # loss_total += loss.item()

            pred = 1 if loss.item() > worst_loss else 0
            y_true.append(batch_label.item())
            y_pred.append(pred)

        # loss_total /= len(loader)
        prec, rec, f1, acc = score_report(y_true, y_pred)

        log_str = f"Test | Prec: {prec:.4f} | Rec: {rec:.4f} | F1: {f1:.4f} | Acc: {acc:.4f} | Time per: {(time.time()-t)/len(test_loader):.4f}s"
        logging.info(log_str)

    result_path = os.path.join(args.exp_dir, args.result_fname)
    with open(result_path, 'a') as f:
        f.write("\n")
        f.write("=" * 80)
        f.write("\n" + get_cmd(sys.argv))
        f.write("\n" + str(args))
        f.write("\n" + args.exp_name)
        f.write("\n" + log_str)
        f.write("\n")


if __name__ == '__main__':

    logging.info(args)
    logging.info("Start training")

    meta_train_error_list = []
    fine_tune_error_list = []

    # meta-training
    t = time.time()
    for epoch in range(1, args.epochs+1):
        meta_train_error = meta_train(epoch)
        meta_train_error_list.append(meta_train_error)
    logging.info("Meta-training end, Total time usage: {:4f}s".format(time.time() - t))

    # fine-tuning
    t = time.time()
    model.reset_regressor()
    for epoch in range(1, args.ft_epochs):
        fine_tune_error = fine_tune(epoch)
        fine_tune_error_list.append(fine_tune_error)
    logging.info("Fine-tuning end, Total time usage: {:4f}s".format(time.time() - t))

    # model.save()

    # test
    # model.load()
    _, worst_loss = validate(worst=True)
    logging.info(f"Worst Loss: {worst_loss:.4f}")
    compute_test(test_loader, lap, worst_loss)


