import argparse
import torch.optim as optim
from dataset.dataloader import *
from network import *

# Training settings
parser = argparse.ArgumentParser()

parser.add_argument('--data_dir', type=str, default="data", help='Directory for dataset.')
parser.add_argument('--dataset' , default="sn", help='sn | tt | aiops | openstack | gaia')
parser.add_argument('--model' , default="v1", help='| v1 | v2: no metric | v3: spatial | gat')

parser.add_argument('--exp_name', type=str, default="run", help='Experiment name.')
parser.add_argument('--exp_dir', type=str, default="experiment", help='Directory for Experiment.')
parser.add_argument('--model_fname', type=str, default="model.pt", help='Model file name.')
parser.add_argument('--log_fname', type=str, default="log.txt", help='Log file name.')
parser.add_argument('--result_fname', type=str, default="results.txt", help='Result file name.')

parser.add_argument('--no_cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False, help='Validate during training pass.')

parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=1000, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=256, help='Batch size.')
parser.add_argument('--patience', type=int, default=50, help='Patience')

parser.add_argument('--lr', type=float, default=0.005, help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')
parser.add_argument('--penalty', type=float, default=0.99, help='Penalty for abnormal traces (class imbalance).')

parser.add_argument('--window', type=int, default=24, help='Metric window length.')
parser.add_argument('--not_embed', action='store_true', default=False, help='Do not use metric embedding.')
parser.add_argument('--embed_size', type=int, default=64, help='Metric embedding size.')

parser.add_argument('--aggr_layer', type=int, default=1, help='Number of Aggr layers.')
parser.add_argument('--aggr_hidden', type=int, default=32, help='Number of Aggr hidden units.')

parser.add_argument('--rnn', type=str, default="lstm", help='RNN backbone for temporal dependency modeling.')
parser.add_argument('--rnn_layer', type=int, default=2, help='Number of Temporal model layers.')
parser.add_argument('--rnn_hidden', type=int, default=32, help='Number of Temporal model hidden units.')

parser.add_argument('--gnn', type=str, default="gcn", help='GNN backbone for spatial dependency modeling.')
parser.add_argument('--gnn_hidden', type=int, default=128, help='Number of hidden units.')
parser.add_argument('--pooling_ratio', type=float, default=0.5, help='Ratio of Graph Pooling.')
parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).')

args = parser.parse_args()

time_str = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%dT%H.%M.%S")
args.exp_name = args.exp_name + '_' + time_str
init_dl_programe(args)
args.env_feats = args.aggr_layer * args.aggr_hidden
args.te_feats = args.rnn_hidden
args.st_feats = args.gnn_hidden
args.base_dir = os.path.join(args.exp_dir, args.exp_name)


if not os.path.exists(args.exp_dir):
    os.mkdir(args.exp_dir)
os.mkdir(args.base_dir)

logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(os.path.join(args.base_dir, args.log_fname), mode="a", encoding="utf-8")
handler.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(handler)


logging.info(f"Start experiment {args.exp_name}")


if args.dataset == 'sn':
    if args.model == 'v1':
        train_loader, val_loader, test_loader = load_sn_full(args)
    else:
        train_loader, val_loader, test_loader = load_sn_wo_metric(args)
else:
    raise ValueError(f"Unknown dataset {args.dataset}.")


embed_model = None
args.embed_size = args.metric_feats
if not args.not_embed:
    # TODO: load metric embedding model
    pass

if args.model == 'v1':
    model = STTraceAD(args, embed_model)
elif args.model == 'v2':
    model = STTraceADV2(args)
elif args.model == 'v3':
    model = STTraceADV3(args)
elif args.model == 'gat':
    model = GATAD(args)
else:
    raise ValueError(f"Unknown model {args.model}.")

model.to(args.device)
# TODO: freeze embedding model
optim = optim.Adam(
    model.parameters(),
    lr=args.lr,
    weight_decay=args.weight_decay
)

assert args.penalty > 0 and args.penalty < 1
weight = torch.FloatTensor([1-args.penalty, args.penalty]).to(args.device)


def train(epoch, args):
    t = time.time()
    model.train()

    loss_train = 0
    for data in train_loader:
        data.to(args.device)
        out = model(data)
        loss = F.cross_entropy(out, data.y, weight=weight)
        loss.backward()
        optim.step()
        optim.zero_grad()
        loss_train += loss.item()

    loss_train /= len(train_loader)

    logging.info('Epoch: {}/{} | '.format(epoch, args.epochs) +
                 'Loss: {:.4f} | '.format(loss_train) +
                 'Time: {:.4f}s'.format(time.time() - t))

    val_loss = compute_test(model, val_loader, args)

    return val_loss


def compute_test(model, loader, args, test=False):
    t = time.time()
    model.eval()
    y_true_list = []
    y_pred_list = []
    with torch.no_grad():
        loss_total = 0
        for data in loader:
            data.to(args.device)
            out = model(data)
            loss = F.cross_entropy(out, data.y, weight=weight)
            loss_total += loss.item()

            y_true_list.append(data.y)
            y_pred_list.append(out.argmax(dim=1))

        y_true = torch.concat(y_true_list, dim=0)
        y_pred = torch.concat(y_pred_list, dim=0)
        prec, rec, f1, acc = score_report(y_true.cpu(), y_pred.cpu())

        loss_total /= len(loader)

        title = 'Test' if test else 'Validate'
        log_str = f"{title} | Loss: {loss_total:.4f} | Prec: {prec:.4f} | Rec: {rec:.4f} | F1: {f1:.4f} | Acc: {acc:.4f} | Time: {time.time()-t:.4f}s"
        logging.info(log_str)

    if test:
        result_path = os.path.join(args.exp_dir, args.result_fname)
        with open(result_path, 'a') as f:
            f.write("\n")
            f.write("=" * 80)
            f.write("\n" + str(args))
            f.write("\n" + args.exp_name + "\n" + log_str + "\n")

    return loss_total


if __name__ == '__main__':

    t = time.time()

    logging.info(args)
    logging.info("Start training")

    loss_list = []
    best_loss = float('inf')
    best_epoch = -1
    bad_counter = 0
    model.train()
    for epoch in range(1, args.epochs+1):
        loss = train(epoch, args)
        loss_list.append(loss)

        if loss < best_loss:
            best_loss = loss
            best_epoch = epoch
            bad_counter = 0
        else:
            bad_counter += 1

        if bad_counter >= args.patience:
            logging.info(f'Early stop at epoch {epoch}')
            break

    logging.info("Training end, Total time usage: {:2f}s".format(time.time() - t))

    model.save()

    # test
    model.load()
    compute_test(model, test_loader, args, test=True)


