import torch
import torch.nn.functional as F
from torch import nn
from tqdm import tqdm

import wandb
from utils.dice_score import multiclass_dice_coeff, dice_coeff
from utils.utils import specificityCalc
from sklearn.metrics import roc_auc_score,accuracy_score,recall_score
import numpy as np

def evaluate(net, dataloader, device,criterion,acceleration):
    net.eval()
    num_val_batches = len(dataloader)

    loss_total = 0
    sample_num = 0

    with torch.no_grad():

        # iterate over the validation set
        for batch in tqdm(dataloader, total=num_val_batches, desc='Validation round', unit='batch', leave=False):
            t1,t2_mask,t2 = batch['t1'],batch['t2_mask'],batch['t2']
            fid = batch['fid']
            t1 = t1.to(device=device, dtype=torch.float32)
            t2_mask = t2_mask.to(device=device, dtype=torch.float32)
            t2_mask_show = t2_mask
            t2_true = t2.to(device=device, dtype=torch.float32)
        
            # predict the mask

            for block in net:   
                t2_pred = block(t2_mask,t1)
                t2_mask = t2_pred
            # t2_pred = net(t1,t2_mask)
            if type(t2_pred) == list:
                t2_pred = t2_pred[0]

        
            # t2_pred = torch.squeeze(t2_pred)

            # t2_pred = data_consistency (fid.cpu().numpy(),  t2_pred.cpu().numpy(),acceleration)              
            # t2_pred = torch.tensor(t2_pred,dtype=torch.float32,device=device)

            # t2_pred = torch.unsqueeze(t2_pred,1)

            loss = criterion(t2_pred, t2_true)
            
        loss_total += loss.item()*t2_pred.shape[0]
        sample_num += t2_pred.shape[0]

    # print (sample_num)
    net.train()

    # Fixes a potential division by zero error
    if num_val_batches == 0:
        return 0
    return loss_total/sample_num,t1[0],t2_mask_show[0],t2_true[0],t2_pred[0]



from sklearn.linear_model import LinearRegression as LR

from sklearn.metrics import mean_squared_error,mean_absolute_error

def LinearRegression(x_oringal, y_oringal):

    x = x_oringal
    y = y_oringal


   

    x_train = x
    x_test = x
    y_train = y
    y_test = y

    model = LR()
    bbb = x_train.reshape((-1, 1))

    model.fit(x_train.reshape((-1, 1)), y_train)
    y_hat = model.predict(x_test.reshape((-1, 1)))

    rmse = np.sqrt(np.sum((y_hat - y_test) ** 2) / len(x_test))
    mse = mean_squared_error(y_test, y_hat)
    mae = mean_absolute_error(y_test, y_hat)
    # notice
    R_squared_Error = 1 - mse / np.var(y_test)

    # print('mean squared error:%.2f' % (mse))
    # print('root mean squared error:%.2f' % (rmse))
    # print('mean absolute error:%.2f' % (mae))
    # print('R squared Error:%.2f' % (R_squared_Error))

    a = model.coef_[0]  # 线性回归问题的估计系数
    b = model.intercept_  # 线性模型中的独立项
    # print(' y = {0} * x + {1}'.format(a, b))

    r2 = model.score(x_train.reshape((-1, 1)), y_train)
    # print("r2:", r2)  # 打印r2指标取值约接近1拟合程度越好

    # x_plot = x_oringal
    # x_plot = x
    # y_plot = x_plot * a + b
    # # plt.scatter(x_oringal, y_oringal)
    # plt.scatter(x, y)
    # plt.plot(x_plot, y_plot, color='red')
    # plt.show()
    return a, b




def data_consistency(fid_cs, img_pred,acceleration=4):

    img_pred = np.squeeze(img_pred)
    # print (fid_cs.shape, img_pred.shape)
    kspace_shape = [256,256]
    mask = np.zeros(kspace_shape)
    # print (mask.shape,kspace_shape)
    start = mask.shape[0]//2 - mask.shape[0]//(2* acceleration )                   
    end = mask.shape[0]//2 + mask.shape[0]//(2* acceleration )
    mask[start:end, :] = 1
    
    mask_low = mask
    mask_pred = np.abs (1-mask_low)
    # row = img_cs.shape[1]
    # idex_fid = range(row // 2 - 3, row // 2 + 3)
    idex_fid = []



    img = []
    max_value = []
    angle = []
    for iter_dc in range(len(fid_cs)):
        cs_img_low = np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(np.multiply(mask_low, fid_cs[iter_dc, :, :]))))
        cs_img_low_phase = np.angle(cs_img_low)

        # slice_fid_cs = np.fft.ifftshift(np.fft.fftn(np.fft.fftshift(img_cs[iter_dc, :, :])))
        slice_fid_cs = np.multiply(fid_cs[iter_dc, :, :], mask)

        fid_pred = np.fft.ifftshift(np.fft.fftn(np.fft.fftshift(img_pred[iter_dc, :, :]  * (np.cos(cs_img_low_phase) + 1j * np.sin(cs_img_low_phase)))))
        phase_pred = np.angle(fid_pred)

        idex_regress = np.where(np.sum(np.delete(mask, idex_fid, axis=0), axis=1) != 0)

        regress_X = np.delete(slice_fid_cs, idex_fid, axis=0)
        regress_X = regress_X[idex_regress, :]

        regress_Y = np.delete(np.multiply(fid_pred, mask), idex_fid, axis=0)
        regress_Y = regress_Y[idex_regress, :]

        regress_X = regress_X.flatten()
        regress_Y = regress_Y.flatten()

        a, b = LinearRegression(abs(regress_X), abs(regress_Y))
        fid_pred_invert_to_cs = (abs(fid_pred) - b) / a
        fid_pred_invert_to_cs = fid_pred_invert_to_cs * (np.cos(phase_pred) + 1j * np.sin(phase_pred))
        fid = np.add(np.multiply(slice_fid_cs, mask), np.multiply(fid_pred_invert_to_cs, mask_pred))
        temp = abs(np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(fid))))
        temp_max_value = np.max(temp)
        temp = temp / temp_max_value
        temp_angle = np.angle(np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(fid))))

        img.append(temp)
        max_value.append(temp_max_value)
        # print(iter_dc)
        angle.append(temp_angle)
    img = np.array(img)
    max_value = np.array(max_value)
    angle = np.array(angle)
        # np.save(load_path + "/dataset/max_all_img_angle_dc", angle)

    return img




def evaluate_classification(net, dataloader, device):
    net.eval()
    num_val_batches = len(dataloader)
    dice_score = 0
    prob_all=[]
    label_all=[]
    # iterate over the validation set
    for batch in tqdm(dataloader, total=num_val_batches, desc='Validation round', unit='batch', leave=False):
        image, masks_true ,labels = batch['image'], batch['mask'], batch['class']
        # move images and labels to correct device and type
        image = image.to(device=device, dtype=torch.float32)
        masks_true = (masks_true/255).to(device=device, dtype=torch.long)


        with torch.no_grad():
            # predict the mask
            masks_pred,class_pred = net(image)

            for channel_index in range(len(masks_pred)):
                mask_pred = masks_pred[channel_index]
                mask_true = masks_true[:,:,:,channel_index]
                mask_true = F.one_hot(mask_true, net.n_classes).permute(0, 3, 1, 2).float()
                # convert to one-hot format
                if net.n_classes == 1:
                    mask_pred = (F.sigmoid(mask_pred) > 0.5).float()
                    # compute the Dice score
                    dice_score += dice_coeff(mask_pred, mask_true, reduce_batch_first=False)
                else:
                    mask_pred = F.one_hot(mask_pred.argmax(dim=1), net.n_classes).permute(0, 3, 1, 2).float()
                    # compute the Dice score, ignoring background
                    dice_score += multiclass_dice_coeff(mask_pred[:, 1:, ...], mask_true[:, 1:, ...], reduce_batch_first=False)
            

            outputs = nn.functional.softmax(class_pred, dim=1)
            prob_all.extend(outputs[:,].cpu().numpy())
            label_all.extend(labels.cpu().numpy())

    dice_score = dice_score/len(masks_pred)
    # print (np.argmax(prob_all,axis=1)==label_all)
    acc = accuracy_score(label_all,np.argmax(prob_all,axis=1))
    auc = roc_auc_score(label_all,prob_all, multi_class="ovr",average="macro")
    # recall = recall_score(label_all,np.argmax(prob_all,axis=1),average="weighted")
    sensitivity = recall_score(label_all,np.argmax(prob_all,axis=1),average="macro")
    specificity = specificityCalc(np.argmax(prob_all,axis=1),label_all)
    net.train()
    
    
    # Fixes a potential division by zero error
    if num_val_batches == 0:
        return dice_score
    return prob_all,label_all,dice_score / num_val_batches, acc,auc,sensitivity,specificity

def evaluate_all_folders(prob_all,label_all,dice,experiment,step):

    acc = accuracy_score(label_all,np.argmax(prob_all,axis=1))
    auc_macro = roc_auc_score(label_all,prob_all, multi_class="ovr",average="macro")
    auc_weighted = roc_auc_score(label_all,prob_all, multi_class="ovr",average="weighted")
    # recall = recall_score(label_all,np.argmax(prob_all,axis=1),average="weighted")
    sensitivity = recall_score(label_all,np.argmax(prob_all,axis=1),average="macro")
    specificity = specificityCalc(np.argmax(prob_all,axis=1),label_all)
    
    # experiment.log({'accuracy' :acc,'AUC':auc_macro,'sensitivity':sensitivity,'specificity':specificity,'dice':dice})
    experiment.summary["test_accuracy"] =acc
    experiment.summary['auc_macro']=auc_macro
    experiment.summary['auc_weighted']=auc_weighted
    experiment.summary['sensitivity']=sensitivity
    experiment.summary['specificity']=specificity
    experiment.summary['dice']=dice/10

    wandb_table_data =  np.concatenate((np.array(prob_all),np.array(label_all)[:, np.newaxis]), axis=1)
    wandb_table = wandb.Table(data=wandb_table_data,columns=["NOR","HCM","HHD","label"])
    wandb.log({
        "results": wandb_table,
        "roc_curve" : wandb.plot.roc_curve( label_all,prob_all, labels=["NOR","HCM","HHD"], classes_to_plot=None),
        "conf_mat" : wandb.plot.confusion_matrix(probs=np.array(prob_all),y_true=np.array(label_all),  class_names=["NOR","HCM","HHD"])
        },step=step)




