"""
this script is used to train ConvNetQuake model. train.
"""
import os
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from ConvNetQuake_pytorch.constant import EVENT_CLUSTER, TRAIN_PATH
from models import ConvNetQuake
import argparse
from eq_dataset import EarthquakeDatasetCache, EarthquakeDataset
from tqdm import tqdm
import numpy as np
import datetime


def main(args):
    args.device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"You are using {args.device}......")

    model = ConvNetQuake(args).to(args.device)

    train_dataset_eq = EarthquakeDatasetCache(args.train_path, args.binary_classification)
    # train_dataset_eq = EarthquakeDataset(args.train_path)
    train_dataset_loader_eq = DataLoader(dataset=train_dataset_eq, batch_size=args.batch_size,
                                         shuffle=args.shuffle)

    test_dataset_eq = EarthquakeDatasetCache(args.test_path, args.binary_classification)
    # test_dataset_eq = EarthquakeDataset(args.test_path)
    test_dataset_loader_eq = DataLoader(dataset=test_dataset_eq, batch_size=args.batch_size, shuffle=args.shuffle)

    # if os.path.exists(args.tensorboard_path):
    #     os.mkdir(args.tensorboard_path)
    if not os.path.exists(args.backup_path):
        os.mkdir(args.backup_path)
    writer = SummaryWriter(args.tensorboard_path)

    loss_fn = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=args.learn_rate)

    for epoch in range(args.epochs):
        train_dataset_eq.init_cache(2)
        batch_len = len(train_dataset_loader_eq)
        model.train()

        with tqdm(total=len(train_dataset_eq)) as train_bar:
            train_bar.set_description(f'[epoch {epoch}], lr={args.learn_rate}')
            for batch_idx, (data, label) in enumerate(train_dataset_loader_eq):
                train_bar.update(len(data))
                data, label = data.to(args.device), label.to(args.device)
                pred = model(data)
                loss = loss_fn(pred, label)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                writer.add_scalar("train_loss", loss.item(), epoch * batch_len + batch_idx)
                if batch_idx % 20 == 0:
                    tqdm.write(f"loss: {loss.item():>7f}")

        # train(args, train_dataset_loader_eq, model, epoch, writer)

        print("testing ...")
        test_acc, test_loss = test(args, test_dataset_loader_eq, model)
        writer.add_scalar("test_loss", test_loss, epoch)
        writer.add_scalar("test_acc", test_acc, epoch)

        print(f"save model e{epoch}_c{test_acc}.pth ...")
        model_save_path = os.path.join(args.backup_path, f'{epoch}_acc{test_acc}.pth')
        torch.save(model.state_dict(), model_save_path)


def train(args, data_loader, model, epoch, writer):
    data_loader.dataset.init_cache(2)
    loss_fn = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=args.learn_rate)

    batch_len = len(data_loader)
    model.train()

    with tqdm(total=len(data_loader.dataset)) as train_bar:
        train_bar.set_description(f'[epoch {epoch}], lr={args.learn_rate}')
        for batch_idx, (data, label) in enumerate(data_loader):
            train_bar.update(len(data))
            data, label = data.to(args.device), label.to(args.device)
            pred = model(data)
            loss = loss_fn(pred, label)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            writer.add_scalar("train_loss", loss.item(), epoch * batch_len + batch_idx)
            if batch_idx % 20 == 0:
                tqdm.write(f"loss: {loss.item():>7f}")


def test(args, data_loader, model):
    data_loader.dataset.init_cache(2)
    size = len(data_loader.dataset)
    num_batches = len(data_loader)
    loss_fn = torch.nn.CrossEntropyLoss()

    model.eval()
    test_loss, acc = 0, 0
    labels = torch.tensor([])
    predictions = torch.tensor([])
    with torch.no_grad():
        for data, label in data_loader:
            data, label = data.to(args.device), label.to(args.device)
            prediction = model(data)
            labels = torch.cat((labels, label.cpu()))
            predictions = torch.cat((predictions, torch.argmax(prediction, dim=1).cpu()))

            test_loss += loss_fn(prediction, label).item()
            acc += (prediction.argmax(1) == label).type(torch.float).sum().item()
        test_loss /= num_batches
        acc /= size
        print(f"[Test Error] Accuracy: {(100 * acc):>0.1f}%, Avg loss: {test_loss:>8f}")
        # if binary classification, then calculate the events recall
        if args.binary_classification:
            confusion_matrix = np.zeros((2, 2))
            labels = labels.numpy().astype(np.int32)
            predictions = predictions.numpy().astype(np.int32)
            for i, j in zip(labels, predictions):
                confusion_matrix[i, j] += 1
            recall = confusion_matrix[1, 1] / (confusion_matrix[1, 0] + confusion_matrix[1, 1])
            print("The recall of events is {}.".format(recall))
    return acc, test_loss


if __name__ == "__main__":
    now_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    parser = argparse.ArgumentParser()
    parser.add_argument("--train_path", type=str, default=os.path.join(TRAIN_PATH, "train.csv"))
    parser.add_argument("--test_path", type=str, default=os.path.join(TRAIN_PATH, "test.csv"))
    parser.add_argument("--batch_size", type=int, default=512)
    parser.add_argument("--learn_rate", '-lr', type=float, default=1e-5)
    parser.add_argument("--shuffle", type=bool, default=False)
    parser.add_argument("--device", type=str, default="gpu")
    parser.add_argument("--n_cluster", type=int, default=EVENT_CLUSTER, help="the cluster number of category, default \
                        is 6. the parameter doesn't work when reduce the task to a binary classification ")
    parser.add_argument("--epochs", type=int, default=50)
    parser.add_argument("--binary_classification", type=bool, default=True,
                        help="whether reduce the detection task to a binary classification")
    parser.add_argument("--backup_path", type=str,
                        default=os.path.join(TRAIN_PATH, "backup", now_time),
                        help="path to save the model")
    parser.add_argument("--tensorboard_path", type=str, default=os.path.join(TRAIN_PATH, "tensorboard", now_time))
    args = parser.parse_args()

    with open("../tensorboard.txt", 'w') as handle:
        handle.write(f'tensorboard --logdir={args.tensorboard_path}')

    main(args)
