import sys
import argparse
import torch.optim as optim
import torch.nn.functional as F
from dataset.dataloader import *
from network import *
from model.tgcn import TGCNModel
from dataset.dataset import SingleTraceDataset
from torch.utils.data import DataLoader
from utils.graph_util import calculate_laplacian_with_self_loop

# Training settings
parser = argparse.ArgumentParser()

parser.add_argument('--data_dir', type=str, default="data", help='Directory for dataset.')
parser.add_argument('--dataset' , default="sn", help='sn | tt | aiops | openstack | gaia')
parser.add_argument('--model' , default="v1", help='| v1 | v2: no metric | v3: spatial | gat')
parser.add_argument('--train_idx', type=int, default=0, help='Index of trace type for train.')
parser.add_argument('--test_idx', type=int, default=1, help='Index of trace type for test.')
parser.add_argument('--few_num', type=int, default=-1, help='Num of traces of train type.')

parser.add_argument('--exp_name', type=str, default="single", help='Experiment name.')
parser.add_argument('--exp_dir', type=str, default="experiment", help='Directory for Experiment.')
parser.add_argument('--model_fname', type=str, default="model.pt", help='Model file name.')
parser.add_argument('--log_fname', type=str, default="log.txt", help='Log file name.')
parser.add_argument('--result_fname', type=str, default="results.txt", help='Result file name.')

parser.add_argument('--no_cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--fast_mode', action='store_true', default=False, help='Validate during training pass.')

parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=10, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=64, help='Batch size.')
parser.add_argument('--patience', type=int, default=3, help='Patience.')
parser.add_argument('--early_stop', type=int, default=5, help='Epoch to start early stop.')

parser.add_argument('--lr', type=float, default=0.005, help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')

parser.add_argument('--window', type=int, default=14, help='Window size for history traces.')

parser.add_argument('--hidden', type=int, default=64, help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).')

args = parser.parse_args()

time_str = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%dT%H.%M.%S")
args.exp_name = args.exp_name + '_' + time_str
init_dl_programe(args)
args.base_dir = os.path.join(args.exp_dir, args.exp_name)


if not os.path.exists(args.exp_dir):
    os.mkdir(args.exp_dir)
os.mkdir(args.base_dir)

logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(os.path.join(args.base_dir, args.log_fname), mode="a", encoding="utf-8")
file_handler.setFormatter(logging.Formatter("%(message)s"))
cmd_handler = logging.StreamHandler(sys.stderr)
cmd_handler.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(file_handler)
logger.addHandler(cmd_handler)


logging.info(f"Start experiment {args.exp_name}")


if args.dataset == 'sn':
    endpoint_list, train_data_list, test_data_list, adj_list, label_data_list, rc_data_list = load_sn(os.path.join(args.data_dir, args.dataset))
    idx = args.train_idx

    if args.few_num > 0:
        if args.train_idx != args.test_idx:
            logging.info("When using few shot learning, you must set train_idx equal to test_idx")
            exit(0)
        train_trace_data = train_data_list[idx][:args.few_num]
    else:
        train_trace_data = train_data_list[idx]

    p = int(train_trace_data.shape[0] * 0.7)

    laplacian = calculate_laplacian_with_self_loop(adj_list[idx])
    lap = laplacian.to(args.device)
    target_laplacian = calculate_laplacian_with_self_loop(adj_list[args.test_idx])
    target_lap = target_laplacian.to(args.device)

    train_dataset = SingleTraceDataset(train_trace_data[0:p, :], args.window)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size) # TODO: shuffle, num_workers

    val_dataset = SingleTraceDataset(train_trace_data[p:, :], args.window)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size)

    test_dataset = SingleTraceDataset(test_data_list[idx], args.window, label_data_list[idx])
    test_loader = DataLoader(test_dataset, batch_size=1)

    target_dataset = SingleTraceDataset(test_data_list[args.test_idx], args.window, label_data_list[args.test_idx])
    target_loader = DataLoader(target_dataset, batch_size=1)

    logging.info(f"Train {args.train_idx} | train: {len(train_dataset)} | valid: {len(val_dataset)} | test: {len(test_dataset)}")
    logging.info(f"Test {args.test_idx} | test: {len(target_dataset)}")
else:
    raise ValueError(f"Unknown dataset {args.dataset }.")



if args.model == 'v1':
    model = TGCNModel(args.hidden).to(args.device)
else:
    raise ValueError(f"Unknown model {args.model}.")

optim = optim.Adam(
    model.parameters(),
    lr=args.lr,
    weight_decay=args.weight_decay
)


def train(epoch):
    t = time.time()
    model.train()
    loss_train = 0
    for batch_x, batch_y, _ in train_loader:
        bz = batch_x.shape[0]
        batch_x = batch_x.to(args.device)
        batch_y = batch_y.to(args.device)

        out = model(batch_x, lap)   # (B, N_i)
        loss = F.mse_loss(out, batch_y)
        loss.backward()
        optim.step()
        optim.zero_grad()
        loss_train += (loss.item() / bz)

    loss_train /= len(train_loader)

    logging.info('Epoch: {}/{} | '.format(epoch, args.epochs) +
                 'Loss: {:.4f} | '.format(loss_train) +
                 'Time: {:.4f}s'.format(time.time() - t))

    if args.fast_mode:
        return loss_train

    val_loss, _ = validate()
    return val_loss


def validate(worst=False):
    t = time.time()
    model.eval()
    with torch.no_grad():
        loss_total = 0
        worst_loss = 0
        for batch_x, batch_y, _ in val_loader:
            bz = batch_x.shape[0]
            batch_x = batch_x.to(args.device)
            batch_y = batch_y.to(args.device)
            out = model(batch_x, lap)
            loss = F.mse_loss(out, batch_y)

            if worst:
                for i in range(bz):
                    l = F.mse_loss(out[i:i+1], batch_y[i:i+1])
                    worst_loss = max(l, worst_loss)

            loss_total += (loss.item() / bz)

        loss_total /= len(val_loader)

        if not worst:
            log_str = f"Validate | Loss: {loss_total:.4f} | Time: {time.time()-t:.4f}s"
            logging.info(log_str)

    return loss_total, worst_loss


def compute_test(loader, laplacian, worst_loss):
    t = time.time()
    model.eval()
    y_true = []
    y_pred = []
    with torch.no_grad():
        # loss_total = 0
        for batch_x, batch_y, batch_label in loader:
            batch_x = batch_x.to(args.device)
            batch_y = batch_y.to(args.device)
            out = model(batch_x, laplacian)
            loss = F.mse_loss(out, batch_y)
            # loss_total += loss.item()

            pred = 1 if loss.item() > worst_loss else 0
            y_true.append(batch_label.item())
            y_pred.append(pred)

        # loss_total /= len(loader)
        prec, rec, f1, acc = score_report(y_true, y_pred)

        log_str = f"Test | Prec: {prec:.4f} | Rec: {rec:.4f} | F1: {f1:.4f} | Acc: {acc:.4f} | Time per: {(time.time()-t)/len(test_loader):.4f}s"
        logging.info(log_str)

    result_path = os.path.join(args.exp_dir, args.result_fname)
    with open(result_path, 'a') as f:
        f.write("\n")
        f.write("=" * 80)
        f.write("\n" + get_cmd(sys.argv))
        f.write("\n" + str(args))
        f.write("\n" + args.exp_name)
        f.write("\n" + log_str)
        f.write("\n")



if __name__ == '__main__':

    t = time.time()

    logging.info(args)
    logging.info("Start training")

    loss_list = []
    best_loss = float('inf')
    best_epoch = -1
    bad_counter = 0

    model.train()
    for epoch in range(1, args.epochs+1):
        val_loss = train(epoch)
        loss_list.append(val_loss)

        if val_loss < best_loss:
            best_loss = val_loss
            best_epoch = epoch
            bad_counter = 0
        else:
            bad_counter += 1

        if bad_counter >= args.patience and epoch > args.early_stop:    # early stop
            logging.info(f'Early stop at epoch {epoch}')
            break

    logging.info("Training end, Total time usage: {:4f}s".format(time.time() - t))

    # model.save()

    # test
    # model.load()
    _, worst_loss = validate(worst=True)
    logging.info(f"Worst Loss: {worst_loss:.4f}")
    compute_test(test_loader, lap, worst_loss)
    if args.train_idx != args.test_idx:
        compute_test(target_loader, target_lap, worst_loss)


