
import os
import sys
import cv2
import torch
import numpy as np
from .model import DenseNet2D
import matplotlib.pyplot as plt

def get_geom_center_from_segmap(seg_map: torch.Tensor, target_class: int):
    """
    从类别图中计算目标类别的几何中心，返回归一化坐标 [-1, 1]

    Args:
        seg_map: [B, H, W] 每个像素点是整数类别标签
        target_class: 目标类别编号（比如 2）

    Returns:
        centers: [B, 2] 中心坐标（x, y），归一化到 [-1, 1]
    """
    B, H, W = seg_map.shape
    centers = []

    for b in range(B):
        mask = (seg_map[b] == target_class)  # [H, W]
        if mask.sum() == 0:
            # 如果没有这个类别，则中心为 (0, 0)
            centers.append(torch.tensor([0.0, 0.0], device=seg_map.device))
            continue

        # 获取坐标
        y_coords, x_coords = torch.nonzero(mask, as_tuple=True)  # 行是 y，列是 x

        # 均值坐标（在像素空间）
        x_mean = x_coords.float().mean()
        y_mean = y_coords.float().mean()

        # 归一化到 [-1, 1]
        x_norm = 2 * x_mean / (W - 1) - 1
        y_norm = 2 * y_mean / (H - 1) - 1

        centers.append(torch.stack([x_norm, y_norm]))

    return torch.stack(centers)  # [B, 2]


def get_predictions(output):
    '''
    Parameters
    ----------
    output : torch.tensor
        [B, C, *] tensor. Returns the argmax for one-hot encodings.

    Returns
    -------
    indices : torch.tensor
        [B, *] tensor.

    '''
    bs,c,h,w = output.size()
    values, indices = output.cpu().max(1)
    indices = indices.view(bs,h,w) # bs x h x w
    return indices

def rescale_to_original(seg_map, pupil_center, scale_shift, orig_shape):
    #print(pupil_center)
    pupil_center[1] -= np.floor(scale_shift[1] // 2)  
    pupil_center[:2] *= (1 / scale_shift[0])          
    if scale_shift[1] < 0:
        # Pad background
        seg_map = np.pad(seg_map, ((-scale_shift[1]//2, -scale_shift[1]//2), (0, 0)))
    elif scale_shift[1] > 0:
        # Remove extra pixels
        seg_map = seg_map[scale_shift[1]//2:-scale_shift[1]//2, ...]

    seg_map = cv2.resize(seg_map, (orig_shape[1], orig_shape[0]), interpolation=cv2.INTER_NEAREST)

    return seg_map, pupil_center

def preprocess_frame(img, op_shape, align_width=True):
    if align_width:
        if op_shape[1] != img.shape[1]:
            sc = op_shape[1]/img.shape[1]
           
            width = int(img.shape[1] * sc)
            height = int(img.shape[0] * sc)
            img = cv2.resize(img, (width, height), interpolation=cv2.INTER_LANCZOS4)

            if op_shape[0] > img.shape[0]:
                # Vertically pad array
                pad_width = op_shape[0] - img.shape[0]
                if pad_width%2 == 0:
                    img = np.pad(img, ((pad_width//2, pad_width//2), (0, 0)))
                else:
                    img = np.pad(img, ((np.floor(pad_width/2), np.ceil(pad_width/2)), (0, 0)))
                scale_shift = (sc, pad_width)

            elif op_shape[0] < img.shape[0]:
                # Vertically chop array off
                pad_width = op_shape[0] - img.shape[0]
                if pad_width%2 == 0:
                    img = img[int(-pad_width / 2):int(+pad_width / 2), ...]
                else:
                    img = img[int(-np.floor(pad_width/2)):int(+np.ceil(pad_width/2)), ...]
                scale_shift = (sc, pad_width)

            else:
                scale_shift = (sc, 0)
        else:
            scale_shift = (1, 0)
    else:
        sys.exit('Height alignment not implemented! Exiting ...')

    #img = (img - img.mean())/img.std()
    img = img/255
    img = torch.from_numpy(img).unsqueeze(0).to(torch.float32) # Add a dummy color channel
    return img, scale_shift

def evaluate_ellseg_on_image(model,frame ):
    assert len(frame.shape) == 4, 'Frame must be [1,1,H,W]'
    with torch.no_grad():
        seg_out = model(frame).cpu()
    #print(frame.shape)
    #保存seg_out到本地

    # 保存为纯二进制文件
    
    seg_out_w = seg_out.squeeze(0)  # Remove batch dimension
    # print(f"seg_out_w shape: {seg_out_w.shape}")
    # with open("tensor.bin", "wb") as f:
    #     shape = torch.tensor(seg_out_w.shape, dtype=torch.int32)
    #     f.write(shape.numpy().tobytes())
    #     f.write(seg_out_w.numpy().tobytes())

    seg_map = get_predictions(seg_out)
    #print(f"seg_map shape: {seg_map.shape}")
    #norm_pupil_center = get_seg2ptLoss(seg_out[:, 2, ...], torch.zeros(2, ), temperature=4)
    norm_pupil_center = get_geom_center_from_segmap(seg_map, 2)[0]
    #print(f"norm_pupil_center: {norm_pupil_center}")
    # Transformation function H
    _, _, H, W = frame.shape
    H = np.array([[W/2, 0,   W/2],
              [0,   H/2, H/2],
              [0,   0,   1]])
    x,y= norm_pupil_center[0], norm_pupil_center[1]
    x_prime = H[0,0] * x + H[0,1] * y + H[0,2]
    y_prime = H[1,0] * x + H[1,1] * y + H[1,2]
    pupil_center= np.array([x_prime, y_prime])
 
    #print(f"pupil_center: {pupil_center}")
    seg_map = seg_map.squeeze(0).numpy()  # Remove batch dimension
    return seg_map, pupil_center
def resize_image(image, target_size=200):
    height, width = image.shape  
    target_width, target_height = target_size, target_size
    if width >= target_width and height >= target_height:
        cropped_frame = image[0:target_height, 0:target_width]  
    else:
        return image
    return cropped_frame

def return_center_map(model,path_img,device):
    frame = cv2.imread(str(path_img), cv2.IMREAD_GRAYSCALE)
    target_size = 200
    frame = resize_image(frame, target_size)
    if frame is None:
        print(f"Error: Unable to load image from {path_img}")
        return
    
    frame_scaled_shifted, scale_shift = preprocess_frame(frame, (240, 240), True)
    
    input_tensor = frame_scaled_shifted.unsqueeze(0).to(device)
    seg_map, pupil_center= evaluate_ellseg_on_image(model, input_tensor)
    seg_map, pupil_ellipse = rescale_to_original(seg_map,
                                                pupil_center,  
                                                scale_shift,
                                                frame.shape)

    pupil_center = np.array([pupil_ellipse[0], pupil_ellipse[1]])
    return pupil_center, seg_map

def load_model(model_path   ,device):
    if not os.path.exists(model_path):
        raise FileNotFoundError(f"Model file {model_path} does not exist.")
    #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = DenseNet2D().to(device)
    state_dict=torch.load(model_path, map_location=device)
    filtered_state_dict = {k: v for k, v in state_dict.items() if not k.startswith('elReg')}
    model.load_state_dict(filtered_state_dict)
    model.eval()  # Set the model to evaluation mode
    return model

def _draw_seg_map(image_path, segmap):
    frame = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
    img= resize_image(frame, target_size=200) 
    H, W = img.shape
    # Convert grayscale image to RGB
    img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
   
    
    segmap_onehot = np.zeros((H, W, 3), dtype=np.uint8)
    segmap_onehot[np.arange(H).reshape(-1, 1), np.arange(W), segmap] = 125
    #将segmap半透明覆盖在原图上
    alpha = 0.5
    overlay = cv2.addWeighted(img, alpha, segmap_onehot, 1 - alpha, 0)
    # 画一个点
    overlay = cv2.circle(overlay, (93,85), 2, (0, 255, 0), -1)
    plt.imshow(overlay)
    plt.savefig('segmentation_overlay.png')

if __name__ == "__main__":
    model_path ='/home/wtpan/wcmx/model_dict/newglass_test_7.pth'  # Replace with your model path
    #image_path = "/home/wtpan/wcmx/EyeCamera/eyedatalogger2025-06-07-15-24-57/camera_R_0/6912062875433/img.png"  # Replace with your image path
    #image_path= "/home/wtpan/wcmx/labeldata/eyedatalogger2025-05-16-14-50-32/camera_L_0/1272296908662.bmp"
    #image_path="/home/wtpan/wcmx/data/Camera9/images/7793759238631.bmp"
    image_path= "/home/wtpan/wcmx/PlayForDreamCpp/testdata/img.png"
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = load_model(model_path, device)

    pupil_center, seg_map = return_center_map(model, image_path, device)
    print(f"Pupil Center: {pupil_center}")
    print(f"Segmentation Map Shape: {seg_map.shape}")
    print(np.unique(seg_map))
    
    #cv2.imwrite('segmentation_map.png', seg_map ) 
    _draw_seg_map(image_path, seg_map)
    