import datetime
import os
from argparse import ArgumentParser, Namespace
from enum import Enum

import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from torch.optim.lr_scheduler import ReduceLROnPlateau as RLRoP
# from torch_geometric.data import Data, DataLoader
from torch_geometric.loader.dataloader import DataLoader
from torch_geometric.logging import log

from datasets import ArchDataset
from early_stopping import EarlyStopping
from models import ArchiteCADNet
from utils import write_array_to_file, read_array_from_file, plot_loss_accuracy_curves

DATASET_DIR = r'/workspace/dataset'
RECORD_DIR = r'/workspace/record'
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class Mask(Enum):
    NONE = 0,
    RECORD = 1,
    PREDEFINED = 2


MASK = Mask.RECORD


def build_args(heads: int, gat_stages: int) -> ArgumentParser:
    parser = ArgumentParser()

    # gat-stages
    parser.add_argument('--heads', type=int, default=heads)
    parser.add_argument('--gat_stages', type=int, default=gat_stages)

    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--dropout_rate', type=float, default=0.2)

    # reduce-learn-rate
    parser.add_argument('--rl_threshold', type=float, default=0.0001)
    parser.add_argument('--rl_patience', type=int, default=4)
    parser.add_argument('--rl_factor', type=float, default=0.6)
    parser.add_argument('--rl_cd', type=int, default=6)
    parser.add_argument('--rl_min_lr', type=float, default=0.0001)

    # early-stop
    parser.add_argument('--es_patience', type=int, default=20)
    parser.add_argument('--es_min_delta', type=float, default=0.001)

    # data-mask
    parser.add_argument('--train_mask', type=float, default=0.8)
    parser.add_argument('--val_mask', type=float, default=0.1)

    parser.add_argument('--epochs', type=int, default=1000)
    parser.add_argument('--batch_size', type=int, default=1)
    # parser.add_argument('--use_gdc', action='store_true', help='Use GDC')
    # parser.add_argument('--wandb', action='store_true', help='Track experiment')

    args = parser.parse_args()
    return args


def get_output_channels(dataset: ArchDataset):
    output_channels = 0
    label_max = -np.inf
    label_min = np.inf
    for data in dataset:
        label_max = max(label_max, torch.IntTensor.item(torch.max(data.y)))
        label_min = min(label_min, torch.IntTensor.item(torch.min(data.y)))
        output_channels = max(output_channels, label_max - label_min + 1)
    if label_min == 1:
        output_channels += 1
    return output_channels


def generate_mask(num_samples, train: float, val: float,
                  *,
                  proportion=1):
    '''
    分配train validation test的数据集mask
    
    Args:
        num_samples (int): number of samples
        train (float): proportion of train samples
        val (float): proportion of validation samples
        proportion (float): proportion of the total dataset to use in masking
    '''
    # 分配用于训练、验证和测试的样本数量
    num_train = int(train * num_samples)  # 训练样本数量
    num_val = int(val * num_samples)  # 验证样本数量
    # num_test = num_samples - num_train - num_val  # 测试样本数量

    train_mask = torch.zeros(num_samples, dtype=torch.int)
    val_mask = torch.zeros(num_samples, dtype=torch.int)
    test_mask = torch.zeros(num_samples, dtype=torch.int)

    indices = torch.randperm(num_samples)
    train_indices = indices[:round(num_train * proportion)]
    val_indices = indices[round(num_train * proportion):round((num_train + num_val) * proportion)]
    test_indices = indices[round((num_train + num_val) * proportion):]

    train_mask[train_indices] = 1
    val_mask[val_indices] = 1
    test_mask[test_indices] = 1

    return train_mask, val_mask, test_mask


def get_masked_datasets(dataset: ArchDataset, masks: list):
    masked_datasets = []
    for mask in masks:
        masked_datasets.append([dataset[i] for i in tqdm.tqdm(range(len(dataset))) if mask[i]])
    return masked_datasets


def get_loader(dataset: ArchDataset, batch_size: int, shuffle: bool = True):
    '''
    获取dataloader
    
    Args:
        dataset (ArchDataset): dataset
        batch_size (int): batch size
        shuffle (bool): shuffle the data
    '''
    loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)
    return loader


def get_loaders(datasets, batch_size):
    loaders = []
    for dataset in datasets:
        loaders.append(get_loader(dataset, batch_size, True))
    return loaders


def save_train_record(args: Namespace, train_log, model, train_dir, ts_now, test_acc):
    '''
    保存模型参数、训练记录、模型
    '''

    args_record = []
    args_dict = vars(args)
    for key, val in args_dict.items():
        args_record.append([key, val])

    args_log_file = os.path.join(train_dir, 'models', 'records', ts_now, 'args.log')
    train_log_file = os.path.join(train_dir, 'models', 'records', ts_now, 'train.log')
    model_save_file = os.path.join(train_dir, 'models', 'records', ts_now, 'model.pth')
    # + f'_{test_acc:<4f}'

    write_array_to_file(args_record, args_log_file, truncate=True)
    write_array_to_file(train_log, train_log_file, truncate=True)
    torch.save(model, model_save_file)


def model_train(args: Namespace,
                gat_version='v2',
                device: torch.device = DEVICE,
                dataset_dir=DATASET_DIR,
                record_dir=RECORD_DIR,
                show_plot: bool = False,
                *,
                pretrained_model_file: str = None):
    def train(loader):
        nonlocal model, device, optimizer
        model.train()

        for batch in loader:
            data = batch[0]
            data = data.to(device)
            data.y = data.y.masked_fill_(data.y == 1, 0)
            data.y = data.y.squeeze().to(device)
            label = torch.squeeze(data.y)
            adj = data.ins_adj.float()
            weight = get_weight(label, adj).to(device)

            optimizer.zero_grad()
            out1, out2 = model(data)
            out2 = out2.squeeze().to(device)
            out2 = (out2 > 0.5).float()

            loss1 = F.cross_entropy(out1, label)
            loss2 = F.binary_cross_entropy_with_logits(out2, adj, weight)
            loss = loss1 + loss2 * 2
            loss.backward()

            optimizer.step()

    @torch.no_grad()
    def test(loader):
        nonlocal model, device
        model.eval()

        correct = 0
        total = 0
        total_loss = 0
        for batch in loader:
            data = batch[0]
            data = data.to(device)
            data.y = data.y.masked_fill_(data.y == 1, 0)
            label = data.y.squeeze().to(device)
            adj = data.ins_adj.float()
            weight = get_weight(label, adj).to(device)

            out1, out2 = model(data)
            out2 = out2.squeeze().to(device)
            out2 = (out2 > 0.5).float()

            loss1 = F.cross_entropy(out1, label)
            loss2 = F.binary_cross_entropy_with_logits(out2, adj, weight)
            loss = loss1 + loss2 * 2
            total_loss += loss.item()

            pred = out1.argmax(dim=-1)
            correct += (pred == label).sum().item()

            total += label.size(0)

        acc = correct / total
        return acc, total_loss / len(loader)

    def get_weight(label, adj):
        N = label.shape[0]

        label_match = (label.unsqueeze(0) == label.unsqueeze(1))
        ins_index = adj.view(N, N)

        weight = torch.zeros([N, N], dtype=torch.float32)

        weight[label_match & (ins_index == 0)] = 20  # 类型相同且无连接
        weight[label_match & (ins_index == 1)] = 2  # 类型相同且有连接

        weight[~label_match & (ins_index == 0)] = 1  # 类型不同且无连接
        weight[~label_match & (ins_index == 1)] = 0  # 类型不同且有连接

        return weight.view(N * N)

    current_time = datetime.datetime.now()
    ts_now = current_time.strftime("%Y-%m-%d-%H-%M-%S")

    dataset = ArchDataset(dataset_dir)

    # 划分训练集、验证集和测试集
    masks = generate_mask(len(dataset), args.train_mask, args.val_mask)

    if MASK != Mask.NONE:
        masks_file = os.path.join(record_dir, 'mask.txt')
        if MASK == Mask.RECORD:
            masks_record = [mask.cpu().detach().numpy() for mask in masks]
            write_array_to_file(masks_record, masks_file)
        if MASK == Mask.PREDEFINED:
            mask: list[list[bool]] = read_array_from_file(masks_file)
            masks = [torch.tensor(m, dtype=torch.int32) for m in mask]

    masked_datasets = get_masked_datasets(dataset, masks)
    loaders = get_loaders(masked_datasets, args.batch_size)

    args.output_channels = get_output_channels(dataset)

    # 模型创建
    use_cuda = True if device.type == 'cuda' else False
    model = ArchiteCADNet(heads=args.heads,
                          gat_stages=args.gat_stages,
                          out_channels=args.output_channels,
                          gat_version=gat_version,
                          use_cuda=use_cuda)
    # model = GATCADNet(heads = args.heads, gat_stages=args.gat_stages, out_channels=args.output_channels, use_cuda=use_cuda)
    model = model.to(device)

    if pretrained_model_file is not None:
        # 如果是继续训练，加载上次训练好的模型权重
        pretrained_model_file = os.path.join(pretrained_model_file)
        if os.path.exists(pretrained_model_file):
            pretrained_dict = torch.load(pretrained_model_file)
            model_dict = model.state_dict()
            pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
            model_dict.update(pretrained_dict)
            model.load_state_dict(model_dict)

    # 模型、优化器
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=[0.9, 0.99])
    scheduler = RLRoP(
        optimizer, 'min',
        threshold=args.rl_threshold,
        patience=args.rl_patience,
        factor=args.rl_factor,
        cooldown=args.rl_cd,
        # min_lr=args.rl_min_lr,
        verbose=True)

    # 训练和结果
    train_log = []
    final_test_acc = 0
    best_val_acc = 0
    early_stopping = EarlyStopping(args.es_patience, args.es_min_delta, True)
    for epoch in range(1, args.epochs + 1):

        train(loaders[0])
        # lr = optimizer.param_groups[0]['lr']

        train_acc, train_loss = test(loaders[0])
        val_acc, val_loss = test(loaders[1])
        test_acc, _ = test(loaders[2])

        # [train_acc, val_acc, tmp_test_acc], pred = test(loader)
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            final_test_acc = test_acc

        log(Epoch=epoch,
            Train_Loss=train_loss, Val_loss=val_loss,
            Train=train_acc, Val=val_acc,
            Test=final_test_acc)

        train_log.append([epoch, train_loss, val_loss, train_acc, val_acc, test_acc])

        scheduler.step(val_loss)

        save_train_record(args, train_log, model, record_dir, ts_now, final_test_acc)

        # 如果验证损失提高了，则调用EarlyStopping实例
        early_stopping(val_loss, model)

        # 如果EarlyStopping类的实例要求停止训练，则停止训练
        if early_stopping.stop():
            print("Early stopping")
            break

    # 图表
    if show_plot:
        plot_loss_accuracy_curves(
            [np.array(train_log)[:, 2]],  # validation loss
            [
                np.array(train_log)[:, 3],  # train accuracy curve
                np.array(train_log)[:, 4],  # validation accuracy curve
                np.array(train_log)[:, 5]  # test accuracy curve
            ],
            len(train_log))  # epoch count


if __name__ == '__main__':
    args = build_args(8, 4)
    model_train(args, 'v2')
