import sys
import argparse
import torch.optim as optim
import torch.nn.functional as F
from dataloader import *
from datetime import datetime
from network import *

# Training settings
parser = argparse.ArgumentParser()

parser.add_argument('--data_dir', type=str, default="data", help='Directory for dataset.')
parser.add_argument('--dataset' , default="sn", help='sn | gaia')

parser.add_argument('--exp_name', type=str, default="putrace", help='Experiment name.')
parser.add_argument('--exp_dir', type=str, default="experiment", help='Directory for Experiment.')
parser.add_argument('--model_fname', type=str, default="model.pt", help='Model file name.')
parser.add_argument('--log_fname', type=str, default="log.txt", help='Log file name.')
parser.add_argument('--result_fname', type=str, default="putrace_results.txt", help='Result file name.')

parser.add_argument('--no_cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--fast_mode', action='store_true', default=False, help='Validate during training pass.')

parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=50, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=128, help='Batch size.')

parser.add_argument('--lr', type=float, default=0.0001, help='Initial learning rate.')
parser.add_argument('--pie', type=float, default=0.15, help='Priority probability of positive data.')
parser.add_argument('--beta', type=float, default=0, help='Hyperparameter wihch ensure risk non-negative.')

parser.add_argument('--gnn_hidden', type=int, default=128, help='Number of hidden units.')
parser.add_argument('--pooling_ratio', type=float, default=0.5, help='Ratio of Graph Pooling.')
parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).')

args = parser.parse_args()

time_str = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%dT%H.%M.%S")
args.exp_name = args.exp_name + '_' + time_str
init_dl_programe(args)
args.base_dir = os.path.join(args.exp_dir, args.exp_name)

if not os.path.exists(args.exp_dir):
    os.mkdir(args.exp_dir)
os.mkdir(args.base_dir)

logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(os.path.join(args.base_dir, args.log_fname), mode="a", encoding="utf-8")
file_handler.setFormatter(logging.Formatter("%(message)s"))
cmd_handler = logging.StreamHandler(sys.stderr)
cmd_handler.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(file_handler)
logger.addHandler(cmd_handler)


logging.info(f"Start experiment {args.exp_name}")

if args.dataset == 'sn':
    train_loader, val_loader, test_loader = load_sn(args)
else:
    raise ValueError(f"Unknown dataset {args.dataset}.")

model = PUTraceAD(args).to(args.device)
optim = optim.Adam(model.parameters(),lr=args.lr)


def train(epoch):
    t = time.time()
    model.train()

    loss_train = 0
    for data in train_loader:
        data.to(args.device)
        out = model(data)
        loss = F.cross_entropy(out, data.y)
        loss.backward()
        optim.step()
        optim.zero_grad()
        loss_train += loss.item()

    loss_train /= len(train_loader)

    logging.info('Epoch: {}/{} | '.format(epoch, args.epochs) +
                 'Loss: {:.4f} | '.format(loss_train) +
                 'Time: {:.4f}s'.format(time.time() - t))

    if args.fast_mode:
        return loss_train

    val_loss = compute_test(model, val_loader)
    return val_loss


def compute_test(model, loader, test=False):
    t = time.time()
    model.eval()
    y_true_list = []
    y_pred_list = []
    with torch.no_grad():
        loss_total = 0
        for data in loader:
            data.to(args.device)
            out = model(data)
            loss = F.cross_entropy(out, data.y)
            loss_total += loss.item()

            y_true_list.append(data.y)
            y_pred_list.append(out.argmax(dim=1))

        y_true = torch.concat(y_true_list, dim=0)
        y_pred = torch.concat(y_pred_list, dim=0)
        prec, rec, f1, acc = score_report(y_true.cpu(), y_pred.cpu())

        loss_total /= len(loader)

        title = 'Test' if test else 'Validate'
        log_str = f"{title} | Loss: {loss_total:.4f} | Prec: {prec:.4f} | Rec: {rec:.4f} | F1: {f1:.4f} | Acc: {acc:.4f} | Time: {time.time()-t:.4f}s"
        logging.info(log_str)

    if test:
        result_path = os.path.join(args.exp_dir, args.result_fname)
        with open(result_path, 'a') as f:
            f.write("\n")
            f.write("=" * 80)
            f.write("\n" + get_cmd(sys.argv))
            f.write("\n" + str(args))
            f.write("\n" + args.exp_name)
            f.write("\n" + log_str)
            f.write("\n")

    return loss_total


if __name__ == '__main__':

    t = time.time()

    logging.info(args)
    logging.info("Start training")

    # best_loss = float('inf')
    # best_epoch = -1
    # bad_counter = 0
    loss_list = []
    model.train()
    for epoch in range(1, args.epochs+1):
        loss = train(epoch)
        loss_list.append(loss)

        # if loss < best_loss:
        #     best_loss = loss
        #     best_epoch = epoch
        #     bad_counter = 0
        # else:
        #     bad_counter += 1

        # if bad_counter >= args.patience:
        #     logging.info(f'Early stop at epoch {epoch}')
        #     break

    logging.info("Training end, Total time usage: {:2f}s".format(time.time() - t))

    # model.save()

    # test
    # model.load()
    compute_test(model, test_loader, test=True)


