# 60年代风格图形渲染器（向量化版本）
# 使用AI辅助

import multiprocessing
import multiprocessing.shared_memory
import time

import numpy as np
import scipy
import PIL.Image

is_pbr_ready = False

try:
    import drawsphere_pbr
    #raise ValueError('123')
    is_pbr_ready = True
except Exception as e:
    print(f"Failed to Import PBR Module: {e}. Use Blinn-Phong instead. ")

allow_normal_map = False

class ADVMAT:
    def __init__(self, F0 = None, metallic = 0.0, roughness=0.5):
        self.F0 = 0.04*np.ones((3))
        self.metallic = float(metallic)
        self.roughness = float(roughness)

        if F0 is not None:
            self.F0[:] = F0
        return


class GROUNDS:
    def __init__(self, ground_y, advmat = None):
        self.ground_y = ground_y

        self.advmat = advmat if advmat else ADVMAT()
        return

    def is_ray_collided_with_me(self, points, directions):
        # points: (N,3), directions: (N,3)
        N = points.shape[0]
        norms = np.linalg.norm(directions, axis=1, keepdims=True)
        directions = directions / norms

        is_collided = np.zeros(N, dtype=bool)
        collided_coord = np.zeros((N, 3))
        normal = np.zeros((N, 3))
        t = np.zeros(N)
        t_final = np.zeros(N)
        color = np.zeros((N, 3))

        # 向量化计算
        valid_mask = ~np.isclose(directions[:, 1], 0.0)
        valid_indices = np.where(valid_mask)[0]
        t[valid_indices] = (self.ground_y - points[valid_indices, 1]) / directions[valid_indices, 1]

        valid_mask = valid_mask & (t > 0)
        valid_indices = np.where(valid_mask)[0]

        if valid_indices.size > 0:
            is_collided[valid_indices] = True
            collided_coord[valid_indices] = points[valid_indices] + t[valid_indices, None] * directions[valid_indices]
            t_final[valid_indices] = t[valid_indices]

            # 向量化获取法向量和颜色
            normal[valid_indices] = self.get_norm(len(valid_indices))
            color[valid_indices] = self.get_color(collided_coord[valid_indices])

        return is_collided, collided_coord, normal, color, t_final

    def get_norm(self, n=1):
        normal = np.tile([0.0, 1.0, 0.0], (n, 1))
        if allow_normal_map:
            perturbation = 0.05 * (2 * np.random.rand(n, 3) - 1)
            normal += perturbation
        norms = np.linalg.norm(normal, axis=1, keepdims=True)
        normal = normal / norms
        return normal

    def get_color(self, coords):
        theta = np.pi * 1 / 6
        rot = np.array([[np.cos(theta), 0, np.sin(theta)],
                        [0, 1, 0],
                        [-np.sin(theta), 0, np.cos(theta)]])
        _coords = (rot @ coords.T).T

        a = np.floor(_coords[:, 0] % 2)
        b = np.floor(_coords[:, 2] % 2)

        condition = (a == 0) & (b == 0) | (a == 1) & (b == 1)
        colors = np.where(condition[:, None], [0.5, 0.5, 0.5], [1.0, 1.0, 1.0])
        return colors

class SPHERES:
    def __init__(self, coord, r, color=None,advmat=None):
        self.coord = np.array(coord)
        self.color = np.ones(3) if color is None else np.array(color)
        self.r = float(r)

        self.advmat = advmat if advmat else ADVMAT()
        return

    def is_ray_collided_with_me(self, points, directions):
        N = points.shape[0]
        norms = np.linalg.norm(directions, axis=1, keepdims=True)
        directions = directions / norms

        is_collided = np.zeros(N, dtype=bool)
        collided_coord = np.zeros((N, 3))
        normal = np.zeros((N, 3))
        color = np.tile(self.color, (N, 1))
        t_final = np.zeros(N)

        diff = points - self.coord
        A = np.sum(directions**2, axis=1)
        B = 2 * np.sum(directions * diff, axis=1)
        C = np.sum(diff**2, axis=1) - self.r**2

        D = B**2 - 4 * A * C
        valid_mask = D > 0
        valid_indices = np.where(valid_mask)[0]

        if valid_indices.size > 0:
            sqrt_D = np.sqrt(D[valid_indices])
            t1 = (-B[valid_indices] + sqrt_D) / (2 * A[valid_indices])
            t2 = (-B[valid_indices] - sqrt_D) / (2 * A[valid_indices])

            t = np.stack([t1, t2], axis=1)
            t[t <= 0] = np.inf
            min_t = np.min(t, axis=1)

            final_valid = min_t < np.inf
            final_indices = valid_indices[final_valid]
            min_t = min_t[final_valid]

            if final_indices.size > 0:
                is_collided[final_indices] = True
                collided_coord[final_indices] = points[final_indices] + min_t[:, None] * directions[final_indices]
                t_final[final_indices] = min_t
                normal[final_indices] = self.get_norm(collided_coord[final_indices])

        return is_collided, collided_coord, normal, color, t_final

    def get_norm(self, points):
        normals = points - self.coord
        if allow_normal_map:
            perturbation = 0.05 * (2 * np.random.rand(points.shape[0], 3) - 1)
            normals += perturbation
        norms = np.linalg.norm(normals, axis=1, keepdims=True)
        normals = normals / norms
        return normals

class CUBES:
    def __init__(self, coord, lwh, color=None, advmat = None):
        self.coord = np.array(coord)
        self.lwh = np.array(lwh)
        self.color = np.ones(3) if color is None else np.array(color)

        self.advmat = advmat if advmat else ADVMAT()
        return

    def is_ray_collided_with_me(self, points, directions):
        N = points.shape[0]
        norms = np.linalg.norm(directions, axis=1, keepdims=True)
        directions = directions / norms

        is_collided = np.zeros(N, dtype=bool)
        collided_coord = np.zeros((N, 3))
        normal = np.zeros((N, 3))
        color = np.tile(self.color, (N, 1))
        t_final = np.zeros(N)

        local_point = points - self.coord
        half_extents = self.lwh / 2.0

        t_min = np.full(N, -np.inf)
        t_max = np.full(N, np.inf)

        for i in range(3):
            dir_i = directions[:, i]
            point_i = local_point[:, i]

            with np.errstate(divide='ignore', invalid='ignore'):
                t1 = (-half_extents[i] - point_i) / dir_i
                t2 = (half_extents[i] - point_i) / dir_i

            swap_mask = t1 > t2
            t1[swap_mask], t2[swap_mask] = t2[swap_mask], t1[swap_mask]

            t_min = np.maximum(t_min, t1)
            t_max = np.minimum(t_max, t2)

        valid_mask = (t_min <= t_max) & (t_max > 0)
        valid_indices = np.where(valid_mask)[0]

        if valid_indices.size > 0:
            t_final[valid_indices] = np.where(t_min[valid_indices] > 0, 
                                             t_min[valid_indices], 
                                             t_max[valid_indices])

            collided_coord[valid_indices] = points[valid_indices] + t_final[valid_indices, None] * directions[valid_indices]
            is_collided[valid_indices] = True

            local_coord = collided_coord[valid_indices] - self.coord
            normal[valid_indices] = self.get_norm(local_coord)

        return is_collided, collided_coord, normal, color, t_final

    def get_norm(self, local_coords):
        half_extents = self.lwh / 2.0
        normalized_coords = local_coords / half_extents

        axis = np.argmax(np.abs(normalized_coords), axis=1)
        signs = np.sign(normalized_coords[np.arange(len(axis)), axis])

        normals = np.zeros((len(axis), 3))
        normals[np.arange(len(axis)), axis] = signs

        if allow_normal_map:
            perturbation = 0.05 * (2 * np.random.rand(len(axis), 3) - 1)
            normals += perturbation

        norms = np.linalg.norm(normals, axis=1, keepdims=True)
        normals = normals / norms
        return normals

class SPRITERS:
    def __init__(self, coord, height, texture, advmat = None):
        self.coord = np.zeros((3))
        self.coord[:] = np.array(coord)

        self.texture = np.flip(np.array(texture).transpose([1, 0, 2]), axis=1)
        if self.texture.ndim != 3 or self.texture.shape[2] != 3:
            raise ValueError('Texture must be RGB image')

        self.height = float(height)
        self.scale = float(height) / self.texture.shape[1]

        self.advmat = advmat if advmat else ADVMAT()
        return

    def is_ray_collided_with_me(self, points, directions):
        N = points.shape[0]
        norms = np.linalg.norm(directions, axis=1, keepdims=True)
        directions = directions / norms

        is_collided = np.zeros(N, dtype=bool)
        collided_coord = np.zeros((N, 3))
        normal = np.zeros((N, 3))
        color = np.zeros((N, 3))
        t_final = np.zeros(N)

        with np.errstate(divide='ignore', invalid='ignore'):
            t = (self.coord[2] - points[:, 2]) / directions[:, 2]

        valid_mask = (np.abs(directions[:, 2]) > 1e-3) & (t > 0)
        valid_indices = np.where(valid_mask)[0]

        if valid_indices.size > 0:
            collided_coord[valid_indices] = points[valid_indices] + t[valid_indices, None] * directions[valid_indices]
            t_final[valid_indices] = t[valid_indices]

            local_xy = collided_coord[valid_indices, :2] - self.coord[:2]
            valid_color, colors = self.get_color(local_xy)

            final_valid = np.zeros(len(valid_indices), dtype=bool)
            final_valid[valid_color] = True

            is_collided[valid_indices[final_valid]] = True
            color[valid_indices[final_valid]] = colors[final_valid]
            normal[valid_indices[final_valid]] = [0.0, 0.0, 1.0]

        return is_collided, collided_coord, normal, color, t_final

    def get_color(self, local_xy):
        i = (local_xy[:, 0] / self.scale + self.texture.shape[0] // 2).astype(int)
        j = (local_xy[:, 1] / self.scale + self.texture.shape[1] // 2).astype(int)

        valid = (i >= 0) & (i < self.texture.shape[0]) & (j >= 0) & (j < self.texture.shape[1])
        valid_indices = np.where(valid)[0]

        colors = np.zeros((local_xy.shape[0], 3))

        if valid_indices.size > 0:
            tex_colors = self.texture[i[valid_indices], j[valid_indices]]
            norm = np.linalg.norm(tex_colors, axis=1)
            color_valid = norm > 0.2

            colors[valid_indices[color_valid]] = tex_colors[color_valid]
            valid[valid_indices] = color_valid

        return valid, colors

class LIGHTS:
    def __init__(self, coord, color = None, intensity = 1): 
        self.coord = np.zeros((3))
        self.color = np.ones((3))
        self.intensity = intensity

        self.coord[:] = coord
        if color is not None:
            self.color[:] = color
        return

def get_refelctive_direction(direction, norm):
    '''
    返回向量direction关于法向量norm的对称向量
    '''
    if direction.ndim != 2 or norm.ndim != 2:
        raise ValueError('123')
    
    direction = direction / np.linalg.norm(direction,axis = 1, keepdims = True)
    norm = norm / np.linalg.norm(norm,axis = 1, keepdims = True)
    
    def decompose_direction(direction, norm):
        """
        将 direction 向量分解为平行于 norm 和垂直于 norm 的两个部分。
        
        参数:
            direction: 向量，形状为 (3,) 或 (n,)
            norm: 单位法向量，形状与 direction 相同
        
        返回:
            parallel: 平行于 norm 的分量
            perpendicular: 垂直于 norm 的分量
        """

        # 计算点积
        dot_product = np.sum(direction * norm, axis = 1, keepdims = True)
        
        # 平行分量：投影到 norm 上
        parallel = dot_product * norm
        
        # 垂直分量：原向量减去平行分量
        perpendicular = direction - parallel
    
        return parallel, perpendicular
    parallel, perpendicular = decompose_direction(direction, norm)
    res = parallel - perpendicular
    res = res / np.linalg.norm(res, axis = 1, keepdims = True)
    return res

def load_texture(filename, target_res_height = 800):
    img = PIL.Image.open(filename).convert('RGB')
    width, height = img.size
    scale_factor = target_res_height / height
    new_width = int(width * scale_factor)
    new_height = int(height * scale_factor)
    img = img.resize((new_width, new_height), PIL.Image.LANCZOS)
    rgb_array = np.array(img, dtype=np.float64) / 255
    return np.clip(rgb_array, 0, 1)

def get_brightness(points, normals, view_directions, basecolor, metallic = 0.0, roughness = 0.5, depth = 0):
    if points.ndim != 2 or normals.ndim != 2 or view_directions.ndim != 2  or basecolor.ndim != 2:
        raise ValueError('123')

    N = points.shape[0]
    normals = normals / np.linalg.norm(normals, axis=1, keepdims=True)
    view_directions = - view_directions / np.linalg.norm(view_directions, axis=1, keepdims=True)

    L_tot = np.zeros((N,3))
    L_tot += ao * ambient_light_color[None,:] * basecolor #(1,3)

    for i in range(len(light_list)):
        light_ray = light_list[i].coord - points
        light_dist2 = np.sum(light_ray**2,axis = 1)
        light_ray = light_ray / np.linalg.norm(light_ray, axis=1, keepdims=True)
        light_color = light_list[i].color
        light_intensity = light_list[i].intensity * (1/(1+0.05*light_dist2)); # 半经验衰减。光照强度理论上按平方反比随距离衰减，但是单纯的平方反比衰减太快，又在近处容易发散，不容易控制
        
        is_collided = is_ray_collided_with_any_obj(points + 0.01 * light_ray, light_ray, use_Zbuffer=False)[0]
        
        # Compute half vector and NdotH
        H = (view_directions + light_ray) / 2.0
        H = H / np.linalg.norm(H, axis=1, keepdims=True)
        NdotL = np.clip(np.sum(normals * light_ray, axis=1), 1e-3, 1)
        NdotH = np.clip(np.sum(normals * H, axis=1), 1e-3, 1)
        
        # NdotL: 漫反射分量，基于朗伯余弦定律，与表面法向量和光源方向的夹角余弦成正比
        L_i = np.clip(1 - metallic*(1-roughness),0,1)[:,None] * light_intensity[:,None] * light_color[None, :] * basecolor * NdotL[:,None] #(N,3)
        
        # NdotH: 高光反射分量(Blinn-Phong模型)，当半角向量H接近法线时产生镜面高光
        L_i += light_intensity[:,None] * light_color[None, :] * basecolor * NdotL[:,None] * NdotH[:,None]**32 #(N,3)
        
        # Apply lighting only to non-collided points
        L_i[is_collided] = 0
        L_tot += L_i
    
    # 镜面采样
    if depth < 2:
        N_sample = 3
        reflective_directions = get_refelctive_direction(view_directions, normals) 
        _reflective_directions = reflective_directions[:,None,:] + 0.025*roughness[:,None,None]*(2*np.random.rand(N,N_sample,3) - 1) # 随机旋转
        B = np.array([start_raytracing(points + 0.01*_reflective_directions[:,i,:], _reflective_directions[:,i,:], depth = depth + 1)[0] for i in range(N_sample)]) # 递归采样 [2,N,3]
        reflective_intensity = np.clip(metallic*(1-roughness),0.025,1) # 反射强度
        LI = np.array([reflective_intensity[:,None] * B[i,:,:] * basecolor  for i in range(N_sample)]) #[2,N,3]
        L_i = np.mean(LI, axis=0)
        L_tot += L_i
        

    L_tot[np.isinf(L_tot) | np.isnan(L_tot)] = 0
    L_tot = np.clip(L_tot,0,1)
    return L_tot

def is_ray_collided_with_any_obj(points, directions, use_Zbuffer=True):
    N = points.shape[0]
    is_collided = np.zeros(N, dtype=bool)
    collided_coord = np.zeros((N, 3))
    normal = np.zeros((N, 3))
    color = np.zeros((N, 3))
    z_buffer = np.full(N, np.inf)
    collided_obj_id = np.full(N, -1, dtype=int)

    for i, obj in enumerate(obj_list):
        _is_collided, _collided_coord, _normal, _color, _t = obj.is_ray_collided_with_me(points, directions)
        update_mask = _is_collided & (_t < z_buffer) & (True if use_Zbuffer else ~is_collided)
        if np.any(update_mask):
            is_collided[update_mask] = True
            collided_coord[update_mask] = _collided_coord[update_mask]
            normal[update_mask] = _normal[update_mask]
            color[update_mask] = _color[update_mask]
            z_buffer[update_mask] = _t[update_mask]
            collided_obj_id[update_mask] = i

    return is_collided, collided_coord, normal, color, z_buffer, collided_obj_id

def start_raytracing(start_points, directions, depth = 0):
    N = start_points.shape[0]
    brightness = np.zeros((N, 3))

    # 检测碰撞
    is_collided, collided_coord, normal, color, t_obj, obj_id = is_ray_collided_with_any_obj(start_points, directions)

    # 计算亮度
    if np.any(is_collided):
        collided_mask = is_collided
        metallic = np.array([obj_list[i].advmat.metallic for i in obj_id[collided_mask]])
        roughness = np.array([obj_list[i].advmat.roughness for i in obj_id[collided_mask]])
        
        if is_pbr_ready:
            F0 = np.array([obj_list[i].advmat.F0 for i in obj_id[collided_mask]])
            
            b = drawsphere_pbr.get_brightness_pbr_simd(collided_coord[collided_mask], normal[collided_mask], directions[collided_mask], basecolors = color[collided_mask],
            F0 = F0,
            metallic = metallic,
            roughness = roughness,
            depth = depth
            )
        else:
            b = get_brightness(collided_coord[collided_mask], normal[collided_mask], directions[collided_mask], basecolor = color[collided_mask],metallic = metallic,roughness=roughness, depth = depth)
        brightness[collided_mask] = b

    return brightness, is_collided, t_obj, obj_id

ncam = 1.7

obj_list = []
obj_list.append(SPHERES(np.array([0, 0, -5]), 1))
'数据来源：Palmqvist, A. (2025) Physically Based - The PBR Values Database Available at: https://physicallybased.info/ (Accessed: 14 November 2025).'
obj_list.append(SPHERES(np.array([1, 1, -4]), 0.5, color = np.array([0.910,0.778,0.423]), advmat = ADVMAT(F0 = np.array([0.995,0.974,0.747]), metallic = 1.0, roughness = 0.6)))
obj_list.append(CUBES(np.array([0, -1.5, -9]), np.array([10, 1, 1]), color=np.array([0.0, 1.0, 0.0])))
obj_list.append(SPRITERS(np.array([3, -2 + 1.5, -6]), 3, load_texture('image.jpg')))
obj_list.append(GROUNDS(-2))

light_list = []
light_list.append(LIGHTS(np.array([5,5,0]),color = np.array([1.0,1.0,1.0]),intensity = 4))

ambient_light_color = np.array([1.0,1.0,1.0])
ao = 0.2

if is_pbr_ready:
    drawsphere_pbr.ao = ao
    drawsphere_pbr.ambient_light_color = ambient_light_color
    drawsphere_pbr.light_list = light_list
    drawsphere_pbr.is_ray_collided_with_any_obj_simd = is_ray_collided_with_any_obj
    drawsphere_pbr.start_raytracing_simd = start_raytracing
    drawsphere_pbr.get_refelctive_direction_simd = get_refelctive_direction

def worker(i, shm1_name, shm2_name, ns, i_start, i_end, batch_size=8000):
    shm1 = multiprocessing.shared_memory.SharedMemory(name=shm1_name)
    shm2 = multiprocessing.shared_memory.SharedMemory(name=shm2_name)

    sxy = np.ndarray((ns, 2), dtype=np.float64, buffer=shm1.buf)
    us = np.ndarray((ns, 5), dtype=np.float64, buffer=shm2.buf)

    # 分批处理
    for start_idx in range(i_start, i_end, batch_size):
        end_idx = min(start_idx + batch_size, i_end)
        batch_size_actual = end_idx - start_idx

        # 获取当前批次
        batch_sxy = sxy[start_idx:end_idx]

        # 创建射线起点和方向
        start_points = np.tile([0, 0, ncam], (batch_size_actual, 1))
        directions = np.column_stack((batch_sxy, -ncam * np.ones(batch_size_actual)))

        brightness, is_collided, t_obj, obj_id = start_raytracing(start_points, directions)

        # 写入结果
        us[start_idx:end_idx,0:3] = brightness
        us[start_idx:end_idx,3] = t_obj
        us[start_idx:end_idx,4] = obj_id

    shm1.close()
    shm2.close()
    return

def apply_simple_fxaa(image, edge_threshold=0.125, edge_threshold_min=0.03125):
    from scipy.ndimage import gaussian_filter

    luma = np.dot(image[..., :3], [0.299, 0.587, 0.114])

    grad_x = np.zeros_like(luma)
    grad_y = np.zeros_like(luma)
    grad_x[:, 1:-1] = luma[:, 2:] - luma[:, :-2]
    grad_y[1:-1, :] = luma[2:, :] - luma[:-2, :]

    edge_intensity = np.abs(grad_x) + np.abs(grad_y)
    is_edge = edge_intensity > max(edge_threshold_min, edge_threshold * np.max(luma))

    blend_weights = np.where(is_edge, 0.5, 1.0)

    blurred = np.zeros_like(image)
    for c in range(3):
        blurred[..., c] = gaussian_filter(image[..., c], sigma=0.75)

    result = np.zeros_like(image)
    for c in range(3):
        result[..., c] = image[..., c] * blend_weights + blurred[..., c] * (1 - blend_weights)

    return np.clip(result, 0, 1)

if __name__ == '__main__':
    dx = 0.001
    L = 1

    xs, ys = np.meshgrid(np.arange(-L, L, dx), np.arange(-L, L, dx))
    n = xs.shape[0]
    sxy = np.column_stack((xs.flatten(), ys.flatten()))
    del xs, ys

    ns = n**2

    us = np.zeros((ns, 5))

    shm1 = multiprocessing.shared_memory.SharedMemory(create=True, size=sxy.nbytes)
    shm2 = multiprocessing.shared_memory.SharedMemory(create=True, size=us.nbytes)
    shared_sxy = np.ndarray((ns, 2), dtype=np.float64, buffer=shm1.buf)
    shared_us = np.ndarray((ns, 5), dtype=np.float64, buffer=shm2.buf)
    shared_sxy[:, :] = sxy
    shared_us[:, :] = 0

    print(f"Total points: {ns}")

    num_proc = 8
    processes = []
    i_start = []
    i_end = []

    start_time = time.time()

    for i in range(num_proc):
        work_range = ns // num_proc
        start_idx = work_range * i
        end_idx = work_range * (i + 1) if i < num_proc - 1 else ns

        i_start.append(start_idx)
        i_end.append(end_idx)

        p = multiprocessing.Process(target=worker, args=(i, shm1.name, shm2.name, ns, start_idx, end_idx))
        processes.append(p)
        p.start()

    print(f"Start indices: {i_start}")
    print(f"End indices: {i_end}")

    for p in processes:
        p.join()

    image = shared_us[:,0:3].reshape((n, n, 3)).copy()
    image = apply_simple_fxaa(image)
    image = (image * 255).astype(np.uint8)
    image = PIL.Image.fromarray(image).transpose(PIL.Image.FLIP_TOP_BOTTOM)

    image_deep = shared_us[:,3].reshape((n, n)).copy()
    image_deep[np.isnan(image_deep) | np.isinf(image_deep)] = 0
    image_deep = np.clip(image_deep,0,20) / 20
    image_deep = (image_deep * 255).astype(np.uint8)
    image_deep = PIL.Image.fromarray(image_deep).transpose(PIL.Image.FLIP_TOP_BOTTOM)


    image_objid = shared_us[:,4].reshape((n, n)).copy()
    image_objid = (image_objid+1)/(len(obj_list)+1)
    image_objid = (image_objid * 255).astype(np.uint8)
    image_objid = PIL.Image.fromarray(image_objid).transpose(PIL.Image.FLIP_TOP_BOTTOM)

    shm1.close()
    shm1.unlink()
    shm2.close()
    shm2.unlink()

    end_time = time.time()
    print(f"Execution time: {end_time - start_time:.2f} seconds")

    image.save('output_image.png')
    image_deep.save('output_image_deep.png')
    image_objid.save('output_image_objid.png')
    image.show()
