import sys
from torch.utils.data import DataLoader
from utils.data_loading import BasicDataset
from utils.data_loading_a_b_label import NewBasicDataset
import logging
from utils.path_hyperparameter import ph
import torch
from torchmetrics import MetricCollection, Accuracy, Precision, Recall, F1Score
from models.Models import DPCD
from utils.dataset_process import compute_mean_std
from tqdm import tqdm



def train_net(root_dir, dataset_name, load_checkpoint=True, default=True, split=None, split_val=None):
    # 1. Create dataset

    # compute mean and std of train dataset to normalize train/val/test dataset
    # t1_mean, t1_std = compute_mean_std(images_dir=os.path.join(root_dir, f'{dataset_name}/train/t1/'))
    # t2_mean, t2_std = compute_mean_std(images_dir=os.path.join(root_dir, f'{dataset_name}/train/t2/'))
    t1_mean, t1_std = [0.44647954, 0.44253507, 0.37772247], [0.17393848, 0.16415723, 0.15207397]
    t2_mean, t2_std = [0.34149348, 0.33470663, 0.28560712], [0.12841433, 0.12489144, 0.11756364]

    dataset_args = dict(t1_mean=t1_mean, t1_std=t1_std, t2_mean=t2_mean, t2_std=t2_std)
    
    if ph.dataset_format == 'new_version':
        assert split is not None
        assert split_val is not None
        test_dataset = NewBasicDataset(root_dir=root_dir + f'/{dataset_name}', split=split_val,
                                   train=False,
                                   **dataset_args)
    else:
        test_dataset = BasicDataset(t1_images_dir=root_dir + f'/{dataset_name}/test/t1/',
                                t2_images_dir=root_dir + f'/{dataset_name}/test/t2/',
                                labels_dir=root_dir + f'/{dataset_name}/test/label/',
                                train=False, **dataset_args)
    # 2. Create data loaders
    loader_args = dict(num_workers=8,
                       prefetch_factor=5,
                       persistent_workers=True
                       )
    test_loader = DataLoader(test_dataset, shuffle=False, drop_last=False,
                             batch_size=ph.batch_size * ph.inference_ratio, **loader_args)

    # 3. Initialize logging
    logging.basicConfig(level=logging.INFO)

    # 4. Set up device, model, metric calculator
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.basicConfig(level=logging.INFO)
    logging.info(f'Using device {device}')
    net = DPCD(default)
    net.to(device=device)
    # net = torch.compile(net)
    assert ph.load, 'Loading model error, checkpoint ph.load'
    load_model = torch.load(ph.load, map_location=device)
    # load compiled model
    keys_list = list(load_model.keys())
    for key in keys_list:
        if 'orig_mod.' in key:
            deal_key = key.replace('_orig_mod.', '')
            load_model[deal_key] = load_model[key]
            del load_model[key]
    if load_checkpoint:
        net.load_state_dict(load_model['net'])
        
    else:
        net.load_state_dict(load_model)
    logging.info(f'Model loaded from {ph.load}')
    torch.save(net.state_dict(), f'{dataset_name}_best_model.pth')

    metric_collection = MetricCollection({
        'accuracy': Accuracy(task='binary').to(device=device),
        'precision': Precision(task='binary').to(device=device),
        'recall': Recall(task='binary').to(device=device),
        'f1score': F1Score(task='binary').to(device=device)
    })  # metrics calculator

    net.eval()
    logging.info('SET model mode to test!')

    with torch.no_grad():
        for batch_img1, batch_img2, labels, name in tqdm(test_loader):
            batch_img1 = batch_img1.float().to(device)
            batch_img2 = batch_img2.float().to(device)
            labels = labels.float().to(device)

            log = False
            cd_preds = net(batch_img1, batch_img2, log=log, img_name=name,gt=None)
            if log:
                cd_preds = cd_preds[0]
            else:
                cd_preds = cd_preds[0][0]
            cd_preds = (cd_preds >= 0.5)
            # cd_preds = torch.sigmoid(cd_preds)

            # Calculate and log other batch metrics
            cd_preds = cd_preds.float()
            labels = labels.int().unsqueeze(1)
            metric_collection.update(cd_preds, labels)

            # clear batch variables from memory
            del batch_img1, batch_img2, labels

        test_metrics = metric_collection.compute()
        print(f"Metrics on all data: {test_metrics}")
        metric_collection.reset()

    print('over')


if __name__ == '__main__':

    try:
        train_net(root_dir=ph.root_dir,
                  dataset_name=ph.dataset_name,
                  load_checkpoint=False,
                  default=ph.default,
                  split=ph.split,
                  split_val=ph.split_val)
    except KeyboardInterrupt:
        logging.info('Error')
        sys.exit(0)
