from __future__ import print_function
from __future__ import absolute_import
from __future__ import division

import argparse
import cv2
import numpy as np
import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
from torch.utils.data import DataLoader
from ECSSD.dataloader import get_train_loader, get_test_loader

from src.model import SODModel
from src.dataloader import InfDataloader, SODLoader


def parse_arguments():
    parser = argparse.ArgumentParser(description='Parameters to train your model.')
    parser.add_argument('--model_path', default='./models/alph-0.7_wbce_w0-1.0_w1-1.15/weights/best-model_epoch-246_mae-0.0878_loss-0.7836.pth', help='Path to model', type=str)
    parser.add_argument('--use_gpu', default=True, help='Whether to use GPU or not', type=bool)
    parser.add_argument('--img_size', default=256, help='Image size to be used', type=int)
    parser.add_argument('--bs', default=24, help='Batch Size for testing', type=int)

    return parser.parse_args()


def run_inference(args):
    # Determine device
    if args.use_gpu and torch.cuda.is_available():
        device = torch.device(device='cuda')
    else:
        device = torch.device(device='cpu')

    # Load model
    model = SODModel()
    chkpt = torch.load(args.model_path, map_location=device)
    model.load_state_dict(chkpt['model'])
    model.to(device)
    model.eval()

    test_dataloader = get_test_loader("./data/", "./data/test_pair.lst")

    # Output
    output_dir = './output'
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    print("Press 'q' to quit.")
    with torch.no_grad():
        for batch_idx, (img_np, img_tor, gt_name) in enumerate(tqdm.tqdm(test_dataloader), start=1):
            img_tor = img_tor.to(device)
            pred_masks, _ = model(img_np.to(device))

            # Assuming batch_size = 1
            img_np = img_np.squeeze(0).numpy().transpose(1, 2, 0)  # (1, 3, x, y) to (x, y, 3)
            img_np = (img_np * 255).astype(np.uint8)  # [0, 1] to [0, 255]
            img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)

            pred_masks_raw = pred_masks.squeeze(0).squeeze(0).cpu().numpy()  # (1, 1, x, y) to (x, y)
            pred_masks_raw = (pred_masks_raw * 255).astype(np.uint8)  # [0, 1] to [0, 255]
            pred_masks_round = np.round(pred_masks_raw).astype(np.uint8)  # round to int

            print('Image :', batch_idx)

            # Save images
            input_image_path = os.path.join(output_dir, f'input_image_{batch_idx}.png')
            saliency_mask_path = os.path.join(output_dir, f'generated_saliency_mask_{batch_idx}.png')
            rounded_mask_path = os.path.join(output_dir, f'rounded_saliency_mask_{batch_idx}.png')

            cv2.imwrite(input_image_path, img_np)
            cv2.imwrite(saliency_mask_path, pred_masks_raw)
            cv2.imwrite(rounded_mask_path, pred_masks_round)

            # Optional: exit loop after saving images
            key = cv2.waitKey(0)
            if key == ord('q'):
                break


def prec_rec(y_true, y_pred, beta2):

    eps = sys.float_info.epsilon
    tp = torch.sum(y_true * y_pred)
    all_p_pred = torch.sum(y_pred)
    all_p_true = torch.sum(y_true)

    prec = (tp + eps) / (all_p_pred + eps)
    rec = (tp + eps) / (all_p_true + eps)
    # print(prec)
    # print(rec)

    return prec, rec

def calculate_mae_and_fmeasure(args):
    # Determine device
    if args.use_gpu and torch.cuda.is_available():
        device = torch.device(device='cuda')
    else:
        device = torch.device(device='cpu')

    # Load model
    model = SODModel()
    chkpt = torch.load(args.model_path, map_location=device)
    model.load_state_dict(chkpt['model'])
    model.to(device)
    model.eval()

    test_dataloader = get_test_loader("./data/", "./data/test_pair.lst")

    # List to save mean absolute error of each image
    mae_list = []
    total_prec = 0
    total_rec = 0

    with torch.no_grad():
        for batch_idx, (inp_imgs, gt_masks, gt_name) in enumerate(tqdm.tqdm(test_dataloader), start=1):
            inp_imgs = inp_imgs.to(device)
            gt_masks = gt_masks.to(device)
            pred_masks, _ = model(inp_imgs)

            mae = torch.mean(torch.abs(pred_masks - gt_masks), dim=(1, 2, 3)).cpu().numpy()
            mae_list.extend(mae)

            gt_arr = gt_masks.squeeze().cpu()
            pred_arr = pred_masks.squeeze().cpu()

            # Iterate through thresholds to find max F-measure
            max_fmeasure = 0
            best_threshold = 0
            for threshold in range(256):
                threshold = threshold / 255.0
                y_pred = (pred_arr >= threshold).float()
                y_true = (gt_arr >= 0.5).float()

                y_true1 = y_true.view(1, -1)
                y_pred1 = y_pred.view(1, -1)

                prec, rec = prec_rec(y_true1, y_pred1, 0.3)
                fmeasure = ((1 + 0.3) * prec * rec) / (0.3 * prec + rec)
                if fmeasure > max_fmeasure:
                    max_fmeasure = fmeasure
                    best_threshold = threshold

            # Use the best threshold to calculate precision and recall
            y_pred = (pred_arr >= best_threshold).float()
            y_true = (gt_arr >= 0.5).float()
            y_true1 = y_true.view(1, -1)
            y_pred1 = y_pred.view(1, -1)

            prec, rec = prec_rec(y_true1, y_pred1, 0.3)

            total_prec += prec
            total_rec += rec

    overall_prec = total_prec / len(test_dataloader)
    overall_rec = total_rec / len(test_dataloader)
    overall_fb = (((1 + 0.3) * overall_prec * overall_rec) / (0.3 * overall_prec + overall_rec)).item()

    print('MAE for the test set is :', np.mean(mae_list))
    print('F-measure for the test set is :', overall_fb)


if __name__ == '__main__':
    rt_args = parse_arguments()
    calculate_mae_and_fmeasure(rt_args)
    # run_inference(rt_args)
