import numpy as np
from tqdm import tqdm
import torch
from torch.cuda.amp import autocast as autocast
from sklearn.metrics import confusion_matrix
from utils import save_imgs
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, jaccard_score


def train_one_epoch(train_loader,
                    model,
                    criterion, 
                    optimizer, 
                    scheduler,
                    epoch, 
                    step,
                    logger, 
                    config,
                    writer):
    '''
    train model for one epoch
    '''
    # switch to train mode
    model.train() 
 
    loss_list = []

    for iter, data in enumerate(train_loader):
        step += iter
        optimizer.zero_grad()
        images, targets = data
        images, targets = images.cuda(non_blocking=True).float(), targets.cuda(non_blocking=True).float()

        out = model(images)
        loss = criterion(out, targets)

        loss.backward()
        optimizer.step()
        
        loss_list.append(loss.item())

        now_lr = optimizer.state_dict()['param_groups'][0]['lr']

        writer.add_scalar('info/lr', now_lr, global_step=step)
        writer.add_scalar('info/loss', loss, global_step=step)

        if iter % config.print_interval == 0:
            log_info = f'epoch {epoch}, iter:{iter}, loss: {np.mean(loss_list):.4f}, lr: {now_lr}'
            print('train: ' + log_info)
            logger.info(log_info)
        
    scheduler.step() 
    return step


def val_one_epoch(test_loader,
                    model,
                    criterion, 
                    epoch, 
                    logger,
                    config,
                    writer):
    # switch to evaluate mode
    model.eval()
    preds = []
    gts = []
    loss_list = []
    with torch.no_grad():
        for data in tqdm(test_loader):
            img, msk = data
            img, msk = img.cuda(non_blocking=True).float(), msk.cuda(non_blocking=True).float()

            out = model(img)
            loss = criterion(out, msk)

            loss_list.append(loss.item())
            gts.append(msk.squeeze(1).cpu().detach().numpy())
            if type(out) is tuple:
                out = out[0]
            out = out.squeeze(1).cpu().detach().numpy()
            preds.append(out) 

    if epoch % config.val_interval == 0:
        # Flatten predictions and ground truths
        preds = np.concatenate(preds)
        gts = np.concatenate(gts)
        
        # Convert predictions to class indices
        y_pred = np.argmax(preds, axis=1) if len(preds.shape) > 1 and preds.shape[1] > 1 else np.where(preds >= config.threshold, 1, 0)
        y_true = gts.astype(np.int32)
        
        # Calculate confusion matrix
        confusion = confusion_matrix(y_true.flatten(), y_pred.flatten(), labels=range(config.num_classes))
        
        # Use scikit-learn metrics to calculate performance metrics
        
        accuracy = accuracy_score(y_true.flatten(), y_pred.flatten())
        f1_or_dsc = f1_score(y_true.flatten(), y_pred.flatten(), average='macro' if config.num_classes > 2 else 'binary')
        sensitivity = recall_score(y_true.flatten(), y_pred.flatten(), average='macro' if config.num_classes > 2 else 'binary')
        
        # For specificity we need the confusion matrix
        if config.num_classes == 2:
            TN, FP, FN, TP = confusion[0,0], confusion[0,1], confusion[1,0], confusion[1,1]
            specificity = float(TN) / float(TN + FP) if float(TN + FP) != 0 else 0
        else:
            # For multiclass, calculate specificity for each class and average
            specificities = []
            for i in range(config.num_classes):
                true_neg = np.sum(confusion) - np.sum(confusion[i,:]) - np.sum(confusion[:,i]) + confusion[i,i]
                false_pos = np.sum(confusion[:,i]) - confusion[i,i]
                specificity_i = true_neg / (true_neg + false_pos) if (true_neg + false_pos) != 0 else 0
                specificities.append(specificity_i)
                specificity = np.mean(specificities)
        
        miou = jaccard_score(y_true.flatten(), y_pred.flatten(), average='macro' if config.num_classes > 2 else 'binary')

        log_info = f'val epoch: {epoch}, loss: {np.mean(loss_list):.4f}, miou: {miou}, f1_or_dsc: {f1_or_dsc}, accuracy: {accuracy}, \
            specificity: {specificity}, sensitivity: {sensitivity}, confusion_matrix: {confusion}'
        print(log_info)
        logger.info(log_info)
        
        # log metrics
        writer.add_scalar('metrics/val_miou', miou, global_step=epoch)
        writer.add_scalar('metrics/val_dsc', f1_or_dsc, global_step=epoch)
        writer.add_scalar('metrics/val_accuracy', accuracy, global_step=epoch)
        writer.add_scalar('metrics/val_sensitivity', sensitivity, global_step=epoch)
        writer.add_scalar('metrics/val_specificity', specificity, global_step=epoch)
        writer.add_scalar('metrics/val_loss', np.mean(loss_list), global_step=epoch)
        
        
        # Find an image with non-zero ground truth for visualization
        for i, (img_sample, msk_sample) in enumerate(test_loader):
            if torch.sum(msk_sample) > 0:  # Check if mask contains non-zero values
                img_sample = img_sample.cuda().float()
                msk_sample = msk_sample.cuda().float()
                
                # Get model prediction for this sample
                with torch.no_grad():
                    out_sample = model(img_sample)
                    if type(out_sample) is tuple:
                        out_sample = out_sample[0]
                
                # Convert to numpy for visualization
                img_np = img_sample[0].cpu().numpy().transpose(1, 2, 0)  # CHW -> HWC
                msk_np = msk_sample[0].cpu().numpy()
                pred_np = out_sample[0].cpu().numpy()
                pred_np = np.argmax(pred_np, axis=1)
                
                # Add images to tensorboard
                img_np = np.clip(img_np, 0, 1)  # Ensure values are in [0,1]
                msk_binary = msk_np.astype(np.float32)  # Convert to float32
                pred_binary = pred_np.astype(np.float32)  # Convert to float32
                                
                # Add dataformats parameter to specify the format
                writer.add_image(f'val/{epoch}/Image', img_np, global_step=epoch, dataformats='HWC')
                writer.add_image(f'val/{epoch}/GroundTruth', msk_binary, global_step=epoch, dataformats='HW')
                writer.add_image(f'val/{epoch}/Prediction', pred_binary, global_step=epoch, dataformats='HW')
                
                break  # Only process one example

    else:
        log_info = f'val epoch: {epoch}, loss: {np.mean(loss_list):.4f}'
        print(log_info)
        logger.info(log_info)
    
    return np.mean(loss_list)


def test_one_epoch(test_loader,
                    model,
                    criterion,
                    logger,
                    config,
                    test_data_name=None):
    # switch to evaluate mode
    model.eval()
    preds = []
    gts = []
    loss_list = []
    with torch.no_grad():
        for i, data in enumerate(tqdm(test_loader)):
            img, msk = data
            img, msk = img.cuda(non_blocking=True).float(), msk.cuda(non_blocking=True).float()

            out = model(img)
            loss = criterion(out, msk)

            loss_list.append(loss.item())
            msk = msk.squeeze(1).cpu().detach().numpy()
            gts.append(msk)
            if type(out) is tuple:
                out = out[0]
            out = out.squeeze(1).cpu().detach().numpy()
            preds.append(out) 
            if i % config.save_interval == 0:
                save_imgs(img, msk, out, i, config.work_dir + 'outputs/', config.datasets, config.threshold, test_data_name=test_data_name)

        preds = np.array(preds).reshape(-1)
        gts = np.array(gts).reshape(-1)

        y_pre = np.where(preds>=config.threshold, 1, 0)
        y_true = np.where(gts>=0.5, 1, 0)

        confusion = confusion_matrix(y_true, y_pre)
        TN, FP, FN, TP = confusion[0,0], confusion[0,1], confusion[1,0], confusion[1,1] 

        accuracy = float(TN + TP) / float(np.sum(confusion)) if float(np.sum(confusion)) != 0 else 0
        sensitivity = float(TP) / float(TP + FN) if float(TP + FN) != 0 else 0
        specificity = float(TN) / float(TN + FP) if float(TN + FP) != 0 else 0
        f1_or_dsc = float(2 * TP) / float(2 * TP + FP + FN) if float(2 * TP + FP + FN) != 0 else 0
        miou = float(TP) / float(TP + FP + FN) if float(TP + FP + FN) != 0 else 0

        if test_data_name is not None:
            log_info = f'test_datasets_name: {test_data_name}'
            print(log_info)
            logger.info(log_info)
        log_info = f'test of best model, loss: {np.mean(loss_list):.4f},miou: {miou}, f1_or_dsc: {f1_or_dsc}, accuracy: {accuracy}, \
                specificity: {specificity}, sensitivity: {sensitivity}, confusion_matrix: {confusion}'
        print(log_info)
        logger.info(log_info)

    return np.mean(loss_list)
