import torch
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision.transforms import Compose, ToTensor, Grayscale

import lithosim_cuda as litho
from epe_checker import get_epe_checkpoints

# resist像
def sigmoid_ilt_z(intensity_map, theta_z=50, threhold_z=0.225):
    sigmoid_layer = torch.nn.Sigmoid()
    intensity_map = (intensity_map - threhold_z) * theta_z
    return sigmoid_layer(intensity_map)

# mask松弛
def relex_mask(mask, theta_m=4, threhold_m=0.0):
    sigmoid_layer = torch.nn.Sigmoid()
    mask = (mask - threhold_m) * theta_m
    return sigmoid_layer(mask)

# mask二值化
def binary_mask(mask, threshold_m=0.5):
    return (mask >= threshold_m).type(torch.cuda.FloatTensor)

def bit_mask_to_two_value_mask(mask):
    r"""
    Bridging the network prediction to the input of ILT loss layer
    Args:
        input mask \in {0, 1}
    Return:
        output mask \in {-1, 1}
    """
    return (mask.mul(2.0)).add(-1.0)


def compute_common_term(mask, target, kernels, weight, dose=1.0, gamma=4.0, theta_z=50, theta_m=4):
    r"""
    Compute the common term for graident calculation
    """
    intensity_map, _ = litho.lithosim(mask, None, kernels, weight,
                    wafer_output_path=None, save_bin_wafer_image=False,
                    kernels_number=None, avgpool_size=None,
                    dose=dose, return_binary_wafer=False)

    z_nom = sigmoid_ilt_z(intensity_map, theta_z) # Nomial wafer image with continuous value between (0,1)
    z_t = target # Binary target image
    common_term = (z_nom - z_t).pow(gamma-1) * (1 - z_nom) * z_nom # [1 * H * W]
    return common_term


def compute_gradient(mask, target, kernels, kernels_ct, weight,
                     dose=1.0, gamma=4.0, theta_z=50, theta_m=4, avgpool_size=None):
    r"""
    Main function of ILT loss gradient calculation
    Args:
        mask: input mask image
        target: target layout
        kernels (kernels_ct): SOCS kernels
        weight: weights for SOCS kernels
        dose: +-2% process condition \in {0.98, 1.0, 1.02} -> min/nomial/max
        gamma, theta_z, theta_m: hyper-parameters for gradient calculation, same as MOSAIC (Gao et al., DAC'14)
    Return:
        Gradient tensor of ilt_loss
    """
    common_term = compute_common_term(mask, target, kernels, weight, dose, gamma, theta_z, theta_m)

    mask_convolve_kernel_ct_output = litho.convolve_kernel(mask, kernels_ct, weight, dose)  # [1 * H * W]
    mask_convolve_kernel_ct_output = mask_convolve_kernel_ct_output * common_term  # real_part = real_part * common_term, imagine_part = imagine_part * common_term

    # Here we need to flip the kernels and kernels_ct => rotate H and H* by 180 degrees
    kernels_flip = torch.flip(kernels, [1, 2])
    kernels_ct_flip = torch.flip(kernels_ct, [1, 2])
    gradient_right_term = litho.convolve_kernel(mask_convolve_kernel_ct_output, kernels_flip, weight,
                                                dose).real  # [1 * H * W], take the real part

    mask_convolve_kernel_output = litho.convolve_kernel(mask, kernels, weight, dose)  # [1 * H * W]
    mask_convolve_kernel_output = mask_convolve_kernel_output * common_term  # real_part = real_part * common_term, imagine_part = imagine_part * common_term
    gradient_left_term = litho.convolve_kernel(mask_convolve_kernel_output, kernels_ct_flip, weight,
                                               dose).real  # [1 * H * W], take the real part

    constant = gamma * theta_z * theta_m
    discrete_penalty_mask = 0.025 * (-8 * mask + 4)  # From the MOSAIC's (GAO et al. DAC'14) source code
    gradient = (constant * (gradient_right_term + gradient_left_term) + theta_m * discrete_penalty_mask) * mask * (
                1 - mask)

    # 取梯度低频部分
    gradient = gauss_lpfilter(gradient, 193, 1.35,2, pixel_size=1)

    if avgpool_size is not None:
        avg_layer = torch.nn.AvgPool2d(
            kernel_size=(avgpool_size, avgpool_size),
            stride=(avgpool_size, avgpool_size))
        gradient = avg_layer(gradient)

    return gradient


def compute_convolve_sigmoid_gradient(mask, kernels, kernels_ct, weight, dose=1.0, theta_z=50, theta_m=4):
    r"""
    Calculate the gradient of the sig(convolve(:,:)) operation
    e.g., Z = Sig(I) = Sig(convolve(mask, kernels))
    Args:
        mask: input mask image
        kernels/kernels_ct: SOCS kernels (focus)
        weight/weight_def: weights for SOCS kernels (focus/defocus)
        dose: +-2% process condition \in {0.98, 1.0, 1.02} -> min/nomial/max
        theta_z, theta_m: hyper-parameters for gradient calculation
    Return:
        Gradient tensor of sig(convolve(:,:))
    """
    intensity_map, _ = litho.lithosim(mask, None, kernels, weight,
                                      wafer_output_path=None, save_bin_wafer_image=False,
                                      kernels_number=None, avgpool_size=None,
                                      dose=dose, return_binary_wafer=False)  # Calculate convolve(mask, kernels)
    z = sigmoid_ilt_z(intensity_map, theta_z)  # Nomial/min/max wafer image with continuous value between (0,1)
    common_term = (1 - z) * z

    # Flip your kernels here
    kernels_flip = torch.flip(kernels, [1, 2])
    kernels_ct_flip = torch.flip(kernels_ct, [1, 2])

    mask_convolve_kernel_ct_output = litho.convolve_kernel(mask, kernels_ct, weight, dose)  # convolve(mask, kernels_ct)
    mask_convolve_kernel_ct_output = mask_convolve_kernel_ct_output * common_term  # convolve(mask, kernels_ct) * z * (1 - z)
    gradient_right_term = litho.convolve_kernel(mask_convolve_kernel_ct_output, kernels_flip, weight,
                                                dose).real  # convolve((convolve(mask, kernels_ct) * z * (1 - z)), kernels), [1 * H * W], take the real part
    mask_convolve_kernel_output = litho.convolve_kernel(mask, kernels, weight, dose)  # convolve(mask, kernels)
    mask_convolve_kernel_output = mask_convolve_kernel_output * common_term  # convolve(mask, kernels) * z * (1 - z)
    gradient_left_term = litho.convolve_kernel(mask_convolve_kernel_output, kernels_ct_flip, weight,
                                               dose).real  # convolve((convolve(mask, kernels) * z * (1 - z)), kernels_ct), [1 * H * W], take the real part
    discrete_penalty_mask = 0.025 * (-8 * mask + 4)  # From the MOSAIC's (GAO et al. DAC'14) source code
    gradient = (theta_z * theta_m * (
                gradient_right_term + gradient_left_term) + theta_m * discrete_penalty_mask) * mask * (1 - mask)

    return gradient, z


# def compute_cplx_gradient(mask, kernels, kernels_ct, kernel_def, kernel_def_ct, weight, weight_def, theta_z=50,
#                           theta_m=4, gamma=4):
#     r"""
#     Calculate the gradient of cplx objective: loss_cplx = ||Z_outer - Z_inner||_gamma
#     Args:
#         mask: input mask image
#         kernels/kernels_ct: SOCS kernels (focus)
#         kernel_def/kernel_def_ct: SOCS kernels (defocus)
#         weight/weight_def: weights for SOCS kernels (focus/defocus)
#         gamma, theta_z, theta_m: hyper-parameters for gradient calculation
#     Return:
#         Gradient tensor of loss_cplx
#     """
#     MAX_DOSE = 1.02
#     MIN_DOSE = 0.98
#     mask_inner_convolve_sigmoid_gradient, z_inner = compute_convolve_sigmoid_gradient(mask, kernel_def, kernel_def_ct,
#                                                                                       weight_def, dose=MIN_DOSE,
#                                                                                       theta_z=theta_z, theta_m=theta_m)
#     mask_outer_convolve_sigmoid_gradient, z_outer = compute_convolve_sigmoid_gradient(mask, kernels, kernels_ct,
#                                                                                       weight, dose=MAX_DOSE,
#                                                                                       theta_z=theta_z, theta_m=theta_m)
#     gradient = gamma * (z_outer - z_inner).pow(gamma - 1) * (
#                 mask_outer_convolve_sigmoid_gradient - mask_inner_convolve_sigmoid_gradient)
#
#     return gradient


# def compute_gradient_scale(mask, target, kernels, kernels_ct, kernel_def, kernel_def_ct, weight, weight_def, new_cord,
#                            cplx_obj=False, origin_size=2048):
#     r"""
#     A wrapper function for gradient calculations of both loss_ilt and loss_cplx with scaling scheme
#     Flow:
#         Input mask (nn prediction) -> scale back to the cropped bbox size -> fit the cropped bbox into original size
#                                    -> calculate the gradient -> crop and scale the gradient to input size -> gradient backward to nn
#         [input_size, input_size] -> [cropped_bbox_size, cropped_bbox_size] -> [original_size, original_size] -> [cropped_bbox_size, cropped_bbox_size] -> [input_size, input_size]
#     Args:
#         mask: input mask image of size N * 1 * input_size * input_size, input_size = 512 by default
#         target: target layout of size N * 1 * origin_size * origin_size, origin_size = 2048 by default
#         kernels/kernels_ct: SOCS kernels (focus)
#         kernel_def/kernel_def_ct: SOCS kernels (defocus)
#         weight/weight_def: weights for SOCS kernels (focus/defocus)
#     Return:
#         Gradient tensor
#     """
#     assert len(mask.shape) == 4
#     assert len(target.shape) == 4
#
#     # NOTE: this function is only suitable with batch-size input
#     # target is N * 1 * 2048 * 2048
#     # input mask size is N * 1 * H * W (N * 1 * 512 * 512)
#
#     # new_cord is a N * 4 tensor, cropped_bbox_size[i] = [rx(i) - lx(i), ry(i) - ly(i)], locate at new_cord[i]
#     lx, ly, rx, ry = new_cord  # lx, ly, rx, ry is N dim tensor
#
#     batch_size = mask.shape[0]
#     channel_size = mask.shape[1]
#     cur_h = mask.shape[2]
#     cur_w = mask.shape[3]
#
#     assert batch_size == len(lx)
#     assert cur_h == cur_w
#     assert abs(rx - lx)[0].item() == abs(ry - ly)[0].item()
#
#     # Gradient size is N * 1 * H * W
#     gradient = torch.zeros((batch_size, channel_size, cur_h, cur_w),
#                            dtype=mask.dtype, layout=mask.layout, device=mask.device)
#
#     for i in range(batch_size):
#         cur_mask = mask[i].unsqueeze(0)
#         cur_target = target[i].unsqueeze(0)
#         mask_crop = torch.nn.functional.interpolate(cur_mask,
#                                                     size=(abs(rx - lx)[i].item(), abs(rx - lx)[i].item()),
#                                                     mode='nearest')
#         mask_origin = torch.zeros((1, channel_size, origin_size, origin_size),
#                                   dtype=mask.dtype, layout=mask.layout, device=mask.device)
#
#         mask_origin[..., ly[i].item():ry[i].item(), lx[i].item():rx[i].item()] = mask_crop
#         if cplx_obj:  # Objective = cplx
#             gradient_origin = compute_cplx_gradient(mask_origin, kernels, kernels_ct, kernel_def, kernel_def_ct, weight,
#                                                     weight_def)
#         else:  # Objective = l2
#             gradient_origin = compute_gradient(mask_origin, cur_target, kernels, kernels_ct, weight)
#         gradient_crop = gradient_origin[..., ly[i].item():ry[i].item(), lx[i].item():rx[i].item()]
#         gradient_tmp = torch.nn.functional.interpolate(gradient_crop, size=(cur_h, cur_w),
#                                                        mode='bilinear', align_corners=False)
#         gradient[i] = gradient_tmp.squeeze(0)
#
#     return gradient  # Return gradient size is N * 1 * H * W

def ideal_lpfilter(image, lamda, NA, pixel_size=1):
    """修正后的低通滤波器，不进行归一化"""
    device = image.device
    # 物理参数
    cut_off = 2 * NA / lamda
    # cut_off = 18
    # 生成频域网格
    size = image.size()
    mask_sample_points = size[-1]
    dx = pixel_size
    dy = pixel_size

    u = torch.fft.fftshift(torch.fft.fftfreq(mask_sample_points, d=dx)).to(device)
    v = torch.fft.fftshift(torch.fft.fftfreq(mask_sample_points, d=dy)).to(device)
    U, V = torch.meshgrid(u, v, indexing='ij')
    D = torch.sqrt(U ** 2 + V ** 2)

    # --- 低通滤波 ---
    ideal_lowpass = (D <= cut_off)
    image_fft = torch.fft.fft2(image)  # 原始FFT结果（零频率在左上角）
    image_shift = torch.fft.fftshift(image_fft)  # 零频率移到中心

    # 应用低通滤波器
    image_low_ishift = torch.fft.ifftshift(image_shift * ideal_lowpass)  # 移回左上角
    image_low = torch.fft.ifft2(image_low_ishift).real

    # is_all_common_zero = torch.all(image_low == 0)
    # print("是否所有低通梯度都是0:", is_all_common_zero.item())  # True 表示全部是 0 + 0j

    return image_low

def gauss_lpfilter(image, lamda, NA, sigma, pixel_size=1):
    device = image.device
    cut_off = 2 * NA / lamda

    # 生成频域网格
    size = image.size()
    mask_sample_points = size[-1]
    dx = pixel_size
    dy = pixel_size

    u = torch.fft.fftshift(torch.fft.fftfreq(mask_sample_points, d=dx)).to(device)
    v = torch.fft.fftshift(torch.fft.fftfreq(mask_sample_points, d=dy)).to(device)
    U, V = torch.meshgrid(u, v, indexing='ij')
    D = torch.sqrt(U ** 2 + V ** 2)

    sig = cut_off / sigma
    gauss_filter = torch.exp(-D ** 2 / (2 * sig ** 2))

    image_fft = torch.fft.fft2(image)
    image_shift = torch.fft.fftshift(image_fft)

    # 应用低通滤波器
    image_low_ishift = torch.fft.ifftshift(image_shift * gauss_filter)  # 移回左上角
    image_low = torch.fft.ifft2(image_low_ishift).real

    return image_low

# 25-2-4版本
# def compute_common_term(theta, target, kernels, weight, dose=1.0, gamma=4.0, theta_z=50, theta_m=4):
#     r"""
#     Compute the common term for graident calculation
#     """
#     # 确保参与优化的都是低频部分
#     mask = (torch.cos(theta) + 1) / 2
#     mask = ideal_lpfilter(mask, 193, 1.35)
#     intensity_map, _ = litho.lithosim(mask, 0.225, kernels, weight,
#                                       wafer_output_path=None, save_bin_wafer_image=False,
#                                       kernels_number=None, avgpool_size=None,
#                                       dose=dose, return_binary_wafer=False)
#     z_nom = sigmoid_ilt_z(intensity_map, theta_z=theta_z, threhold_z=0.225)  # Nominal image
#     z_t = target  # Binary target image
#     common_term = (z_nom - z_t).pow(gamma - 1) * (1.0 - z_nom) * z_nom  # [1 * H * W]
#     return common_term
#
# # 修改后梯度，按照Mosiac的梯度计算方法
# def compute_gradient(theta, target, kernels, kernels_ct, weight,
#                      dose=1.0, gamma=4.0, theta_z=50, theta_m=4, epe_offset=15, avgpool_size=None):
#     r"""
#     Main function of ILT loss gradient calculation
#     Args:
#         mask: input mask image
#         target: target layout
#         kernels (kernels_ct): SOCS kernels
#         weight: weights for SOCS kernels
#         dose: +-2% process condition \in {0.98, 1.0, 1.02} -> min/nomial/max
#         gamma, theta_z, theta_m: hyper-parameters for gradient calculation, same as MOSAIC (Gao et al., DAC'14)
#     Return:
#         Gradient tensor of ilt_loss
#     """
#     mask = (torch.cos(theta) + 1) / 2
#     mask = ideal_lpfilter(mask, 193, 1.35)
#     constant = gamma * theta_z
#     # constant = gamma * theta_z * theta_m 使用sigmod松弛
#     common_term = compute_common_term(theta, target, kernels, weight, dose, gamma, theta_z, theta_m)
#     convol_left_term = litho.convolve_kernel(mask, kernels_ct, weight, dose=dose)
#     convol_right_term = litho.convolve_kernel(mask, kernels, weight, dose=dose)
#     convol_left_term1 = common_term * convol_left_term
#     convol_right_term1 = common_term * convol_right_term
#     gradient_left_term = litho.convolve_kernel(convol_left_term1, kernels, weight, dose=dose).real
#     gradient_right_term = litho.convolve_kernel(convol_right_term1, kernels_ct, weight, dose=dose).real
#
#     discrete_penalty_mask = 0.025 * (-8 * mask + 4)  # 0->0.1, 1->-0.1; 0.5->0? 梯度对应原式子就是-4m^2+4m,0->0,0.5->1,1->0
#     # gradient = ((constant * (gradient_right_term + gradient_left_term) + theta_m * discrete_penalty_mask)
#     #             * mask * (1 - mask)) # sigmod
#     gradient = ((constant * (gradient_right_term + gradient_left_term) + theta_m * discrete_penalty_mask)
#                 *  (- torch.sin(theta)/2))
#     gradient = gradient.unsqueeze(0)  # [1, 1, 512, 512]
#
#     return gradient
#
# def ideal_lpfilter(image, lamda, NA, pixel_size=1):
#     """修正后的低通滤波器，不进行归一化"""
#     device = image.device
#     # 物理参数
#     cut_off = 2 * NA / lamda
#     # 生成频域网格
#     size = image.size()
#     mask_sample_points = size[-1]
#     dx = pixel_size
#     dy = pixel_size
#
#     u = torch.fft.fftshift(torch.fft.fftfreq(mask_sample_points, d=dx)).to(device)
#     v = torch.fft.fftshift(torch.fft.fftfreq(mask_sample_points, d=dy)).to(device)
#     U, V = torch.meshgrid(u, v, indexing='ij')
#     D = torch.sqrt(U ** 2 + V ** 2)
#
#     # --- 低通滤波 ---
#     ideal_lowpass = (D <= cut_off)
#     image_fft = torch.fft.fft2(image)  # 原始FFT结果（零频率在左上角）
#     image_shift = torch.fft.fftshift(image_fft)  # 零频率移到中心
#
#     # 应用低通滤波器
#     image_low_ishift = torch.fft.ifftshift(image_shift * ideal_lowpass)  # 移回左上角
#     image_low = torch.fft.ifft2(image_low_ishift).real
#
#     # is_all_common_zero = torch.all(image_low == 0)
#     # print("是否所有低通梯度都是0:", is_all_common_zero.item())  # True 表示全部是 0 + 0j
#
#     return image_low
#
# def load_image(image_path):
#     r"""
#     Load image and convert to PyTorch Tensor
#     """
#     image = Image.open(image_path)
#     transforms = Compose([
#         Grayscale(num_output_channels=1),
#         ToTensor(),
#     ])
#     image = transforms(image)
#     device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
#     image = image.to(device)
#     return image  # N * 1 * H * W
#
# def mask_init(mask):
#     r"""
#     Initialize mask to [-1, 1]
#     """
#     init_mask = (mask.mul(2.0)).add(-1.0)
#     return init_mask
#
# ## resist成像
# def sigmoid_ilt_z(intensity_map, theta_z=50, threhold_z=0.225):
#     sigmoid_layer = torch.nn.Sigmoid()
#     intensity_map = (intensity_map - threhold_z) * theta_z
#     return sigmoid_layer(intensity_map)
#
# # 初始化掩模,tr:0 0-1->0.5,1; tr:0.5 0-1->0.1,0.9
# def sigmoid_ilt_mask(mask, theta_m=4, threhold_m=0.5):
#     sigmoid_layer = torch.nn.Sigmoid()
#     mask = (mask - threhold_m) * theta_m
#     return sigmoid_layer(mask)
#
# def parseConfig(filename):
#     with open(filename, "r") as fin:
#         lines = fin.readlines()
#     results = {}
#     for line in lines:
#         splited = line.strip().split()
#         if len(splited) >= 2:
#             key = splited[0]
#             value = splited[1]
#             results[key] = value
#     return results