import torch
import concurrent.futures
import time
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from tqdm import tqdm, trange
import cv2
import numpy as np
from PIL import Image
import ctypes
from ctypes import c_int, c_void_p, c_float, c_bool
import math
import gc

BLOCK_X = 16
BLOCK_Y = 16
BLOCK_SIZE = BLOCK_X * BLOCK_Y

lib = ctypes.CDLL("build/libtest.dll")
preprocess_compressed = lib.preprocess_compressed
preprocess_compressed.restype = None
preprocess_compressed.argtypes = [c_int,c_int,c_int,c_void_p,c_void_p,c_void_p,c_float,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_float,c_float,c_float,c_float,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_bool,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p]

duplicateWithKeys = lib.duplicateWithKeys
duplicateWithKeys.restype = None
duplicateWithKeys.argtypes = [c_int, c_void_p, c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p]

GetTileRanges = lib.GetTileRanges
GetTileRanges.restype = None
GetTileRanges.argtypes = [c_int, c_void_p, c_void_p]

render = lib.render
render.restype = None
render.argtypes = [c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int]

render_tile = lib.render_tile
render_tile.restype = None
render_tile.argtypes = [c_int, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_int, c_void_p, c_int, c_int]

GetRanges_tile = lib.GetRanges_tile
GetRanges_tile.restype = None
GetRanges_tile.argtypes = [c_int, c_void_p, c_void_p, c_void_p, c_void_p, c_int, c_void_p, c_void_p, c_void_p, c_void_p]

GetRanges_tile2 = lib.GetRanges_tile2
GetRanges_tile2.restype = None
GetRanges_tile2.argtypes = [c_int, c_void_p, c_void_p, c_void_p, c_void_p, c_int, c_void_p, c_void_p, c_void_p, c_void_p]

renderV2 = lib.renderV2
renderV2.restype = None
renderV2.argtypes = [c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int]

c_sort = lib.c_sort
c_sort.restype = None
c_sort.argtypes = [c_void_p, c_int,c_void_p,c_void_p,c_int, c_void_p]

prefixsum = lib.prefixsum
prefixsum.restype = None
prefixsum.argtypes = [c_void_p, c_void_p, c_int]

def inverse_sigmoid(x):
    return torch.log(x / (1 - x))

class MyRaster:
    def __init__(self, model_path, viewmatrix, projmatrix, campos):
        Input = np.load(model_path)
        # self.quantization = Input['quantization']
        # 坐标（means3D）
        self.xyz = torch.from_numpy(Input['xyz'])
        self.xyz = self.xyz.half().float()
        # sh系数直流分量
        features_dc_q = torch.from_numpy(Input['features_dc']).int()
        features_dc_scale = torch.from_numpy(Input['features_dc_scale'])
        features_dc_zero_point = torch.from_numpy(Input['features_dc_zero_point'])
        features_dc = (features_dc_q - features_dc_zero_point) * features_dc_scale
        features_dc_qa = torch.ao.quantization.FakeQuantize(dtype=torch.qint8)
        self.features_dc = features_dc_qa(features_dc)
        # sh系数剩余部分
        features_rest_q = torch.from_numpy(Input['features_rest'])
        features_rest_scale = torch.from_numpy(Input['features_rest_scale'])
        features_rest_zero_point = torch.from_numpy(Input['features_rest_zero_point'])
        features_dc_qa = torch.ao.quantization.FakeQuantize(dtype=torch.qint8)
        features_rest = (features_rest_q - features_rest_zero_point) * features_rest_scale
        self.features_rest = features_dc_qa(features_rest)
        # sh系数合并
        self.feature = torch.cat((self.features_dc, self.features_rest), dim=1)
        # 不透明度
        opacity_q = torch.from_numpy(Input['opacity'])
        opacity_scale = torch.from_numpy(Input['opacity_scale'])
        opacity_zero_point = torch.from_numpy(Input['opacity_zero_point'])
        opacity = (opacity_q - opacity_zero_point) * opacity_scale
        opacity_qa = torch.ao.quantization.FakeQuantize(dtype=torch.qint8)
        opacity = inverse_sigmoid(opacity)
        opacity = torch.sigmoid(opacity)
        self.opacity = opacity_qa(opacity)
        # 高斯sh系数及形状索引
        self.feature_indices = torch.from_numpy(Input['feature_indices']).long()
        self.gaussian_indices = torch.from_numpy(Input['gaussian_indices']).long()
        # 高斯尺度因子
        scaling_q = torch.from_numpy(Input['scaling'])
        scaling_scale = torch.from_numpy(Input['scaling_scale'])
        scaling_zero_point = torch.from_numpy(Input["scaling_zero_point"])
        scaling = (scaling_q - scaling_zero_point) * scaling_scale
        scaling_qa = torch.ao.quantization.FakeQuantize(dtype=torch.qint8)
        self.scaling_activation = lambda x: torch.nn.functional.normalize(
            torch.nn.functional.relu(x)
        )
        self.scaling = self.scaling_activation(scaling)
        self.scaling = scaling_qa(scaling)
        # 高斯尺度缩放因子
        scaling_factor_q = torch.from_numpy(Input['scaling_factor'])
        scaling_factor_scale = torch.from_numpy(Input['scaling_factor_scale'])
        scaling_factor_zero_point = torch.from_numpy(Input['scaling_factor_zero_point'])
        scaling_factor = (scaling_factor_q - scaling_factor_zero_point) * scaling_factor_scale
        scaling_factor_qa = torch.ao.quantization.FakeQuantize(dtype=torch.qint8)
        self.scaling_factor = scaling_factor_qa(scaling_factor)
        self.scaling_factor = torch.exp(self.scaling_factor)
        # 高斯旋转系数
        rotation_q = torch.from_numpy(Input['rotation'])
        rotation_scale = torch.from_numpy(Input['rotation_scale'])
        rotation_zero_point = torch.from_numpy(Input['rotation_zero_point'])
        rotation = (rotation_q - rotation_zero_point) * rotation_scale
        rotation_qa = torch.ao.quantization.FakeQuantize(dtype=torch.qint8)
        self.rotation = rotation_qa(rotation)
        self.rotation = torch.nn.functional.normalize(self.rotation)
        # print(
        #     (self.xyz.element_size() * self.xyz.nelement()
        # +   self.feature.element_size() * self.feature.nelement()
        # +   self.opacity.element_size() * self.opacity.nelement()
        # +   self.feature_indices.element_size() * self.feature_indices.nelement()
        # +   self.gaussian_indices.element_size() * self.gaussian_indices.nelement()
        # +   self.scaling.element_size() * self.scaling.nelement()
        # +   self.scaling_factor.element_size() * self.scaling_factor.nelement()
        # +   self.rotation.element_size() * self.rotation.nelement())/1024/1024
        # )

        # print(
        #     (self.xyz.element_size() * 187159 * 3
        # +   self.feature.element_size() * 187159 * 48
        # +   self.opacity.element_size() * 187159
        # +   self.scaling.element_size() * 187159
        # +   self.rotation.element_size() * 187159)/1024/1024
        # )

        self.P = self.xyz.shape[0]
        self.D = 3
        self.M = 16
        self.H = 800
        self.W = 800
        self.rect_min = torch.zeros((self.P,2)).int()
        self.rect_max = torch.zeros((self.P,2)).int()
        self.radii = torch.zeros((self.P)).int()
        self.viewmatrix = viewmatrix
        self.projmatrix = projmatrix
        self.campos = campos
        self.tanfovx = 0.3600000
        self.tanfovy = 0.3600000
        self.focal_x = self.W / (2.0 * self.tanfovy)
        self.focal_y = self.H / (2.0 * self.tanfovy)
        self.means2D = torch.zeros((self.P,2))
        self.depths = torch.zeros(self.P)
        self.cov3Ds = torch.zeros((self.P,6))
        self.rgb = torch.zeros((self.P,3))
        self.conic_opacity = torch.zeros((self.P,4))
        self.grid = torch.tensor([self.W/BLOCK_X, self.H/BLOCK_Y, 3]).int()
        self.tiles_touched = torch.zeros(self.P).int()
        self.image = torch.zeros((self.W, self.H, 3))
        # 时间
        self.preprocess_time = 0
        self.GetRanges_time = 0
        self.render_time = 0
        self.total_time = 0
        # 平均splat
        self.avg_splat = 0

    def preprocess(self):
        self.preprocess_time = time.perf_counter()
        args = (
            self.P, self.D ,self.M,
            self.xyz.data_ptr(),
            self.scaling.data_ptr(),
            self.scaling_factor.data_ptr(),
            1.0,
            self.rotation.data_ptr(),
            self.opacity.data_ptr(),
            self.feature.data_ptr(),
            self.viewmatrix.data_ptr(),
            self.projmatrix.data_ptr(),
            self.campos.data_ptr(),
            self.W,self.H,
            self.tanfovx,self.tanfovy,
            self.focal_x,self.focal_y,
            self.radii.data_ptr(),
            self.means2D.data_ptr(),
            self.depths.data_ptr(),
            self.cov3Ds.data_ptr(),
            self.rgb.data_ptr(),
            self.conic_opacity.data_ptr(),
            self.grid.data_ptr(),
            self.tiles_touched.data_ptr(),
            False,
            self.rect_min.data_ptr(),
            self.rect_max.data_ptr(),
            self.feature_indices.data_ptr(),
            self.gaussian_indices.data_ptr(),
            self.image.data_ptr()
        )
        preprocess_compressed(*args)
        self.preprocess_time = time.perf_counter() - self.preprocess_time
        # print(f'preprocess: {self.preprocess_time * 1000}ms')

    def GetRanges(self):
        # self.GetRanges_time = time.perf_counter()
        # points_offsets = torch.cumsum(self.tiles_touched,dim=0).int()
        # num_rendered = points_offsets[-1]
        # gaussian_keys = torch.zeros(num_rendered).long()
        # gaussian_values = torch.zeros(num_rendered).int()
        # duplicateWithKeys(self.P,
        #                   self.rect_min.data_ptr(),
        #                   self.rect_max.data_ptr(),
        #                   self.depths.data_ptr(),
        #                   points_offsets.data_ptr(),
        #                   gaussian_keys.data_ptr(),
        #                   gaussian_values.data_ptr(),
        #                   self.grid.data_ptr(),
        #                   self.radii.data_ptr())
        # indices = gaussian_keys.sort().indices.long() # todo: 改成C
        # gaussian_keys = gaussian_keys[indices]
        # self.gaussian_values = gaussian_values[indices]

        # self.ranges = torch.zeros((self.grid[0] * self.grid[1], 2)).int()
        # GetTileRanges(num_rendered, gaussian_keys.data_ptr(), self.ranges.data_ptr())
        # self.GetRanges_time = time.perf_counter() - self.GetRanges_time
        # del gaussian_keys
        # del gaussian_values
        # print(f'GetRanges: {self.GetRanges_time * 1000}ms')
        self.GetRanges_time = time.perf_counter()
        points_offsets = torch.zeros(self.tiles_touched.shape[0]).int()
        prefixsum(self.tiles_touched.data_ptr(), points_offsets.data_ptr(), self.tiles_touched.shape[0])
        num_rendered = points_offsets[-1]
        self.avg_splat += num_rendered
        gaussian_keys = torch.zeros(num_rendered).long()
        gaussian_values = torch.zeros(num_rendered).int()
        tile_gaussian_num = torch.zeros(self.grid[0] * self.grid[1]).int()
        duplicateWithKeys(self.P,
                          self.rect_min.data_ptr(),
                          self.rect_max.data_ptr(),
                          self.depths.data_ptr(),
                          points_offsets.data_ptr(),
                          gaussian_keys.data_ptr(),
                          gaussian_values.data_ptr(),
                          self.grid.data_ptr(),
                          self.radii.data_ptr())
        indices = torch.zeros(num_rendered).long()
        self.ranges = torch.zeros((self.grid[0] * self.grid[1], 2)).int()
        # T1 = time.perf_counter()
        c_sort(gaussian_keys.data_ptr(),
               num_rendered.item(),
               indices.data_ptr(),
               tile_gaussian_num.data_ptr(), 
               int(self.grid[0] * self.grid[1]), 
               self.ranges.data_ptr())
        self.gaussian_values = gaussian_values[indices]

        self.GetRanges_time = time.perf_counter() - self.GetRanges_time
        del gaussian_keys
        del gaussian_values
        # print(f'GetRanges: {self.GetRanges_time * 1000}ms')

    def render(self):
        self.render_time = time.perf_counter()
        # render(self.grid.data_ptr(),
        #        self.gaussian_values.data_ptr(),
        #        self.means2D.data_ptr(),
        #        self.conic_opacity.data_ptr(),
        #        self.rgb.data_ptr(),
        #        self.ranges.data_ptr(),
        #        self.image.data_ptr(),
        #        self.H,self.W)
        renderV2(self.grid.data_ptr(),
               self.gaussian_values.data_ptr(),
               self.means2D.data_ptr(),
               self.conic_opacity.data_ptr(),
               self.rgb.data_ptr(),
               self.ranges.data_ptr(),
               self.image.data_ptr(),
               self.H,self.W)

        self.render_time = time.perf_counter() - self.render_time
        # print(f'Render: {self.render_time * 1000}ms')
        pass

    def run(self, viewmatrix, projmatrix, campos):
        self.viewmatrix = viewmatrix
        self.projmatrix = projmatrix
        self.campos = campos
        self.preprocess()
        self.GetRanges()
        self.render()
        gc.collect()
        self.total_time = self.preprocess_time + self.GetRanges_time + self.render_time
        # print(f"总: {self.total_time}s")
        return self.image, self.radii

def main(model_name):
    viewmatrix = torch.zeros(4,4)
    projmatrix = torch.zeros(4,4)
    campos = torch.Tensor([-3.9643,  0.1849,  0.7074])
    total_time = 0
    Raster = MyRaster(model_name, viewmatrix, projmatrix, campos)
    for i in range(0, 360, 2):
        # print('第'+ str(int(i/2)) +'张图:')
        var = torch.tensor(i * math.pi / 180)
        theta = torch.Tensor([(270 - i) * math.pi / 180]) # z轴 增大->从左向右转
        alpha = torch.Tensor([262 * math.pi / 180])       # x轴 增大-> 从下向上转
        gama  = torch.Tensor([ 0 ])                       # y轴 不明白,感觉跟z轴一个效果
        # 绕Z轴旋转
        z = torch.tensor([[   torch.cos(theta), -torch.sin(theta),                 0],
                          [   torch.sin(theta),  torch.cos(theta),                 0],
                          [                  0,                 0,                 1]])
        # 绕X轴旋转
        x = torch.tensor([[                  1,                 0,                 0],
                          [                  0,  torch.cos(alpha), -torch.sin(alpha)],
                          [                  0,  torch.sin(alpha),  torch.cos(alpha)]])
        # 绕Y轴旋转
        y = torch.tensor([[    torch.cos(gama),                 0,   torch.sin(gama)],
                          [                  0,                 1,                 0],
                          [   -torch.sin(gama),                 0,   torch.cos(gama)]])
        
        R = z @ x @ y
        # 平移向量：x向前，y向左，z向上
        #T = torch.tensor([-4,  0.2,  0.7])
        T = torch.tensor([-4 * torch.cos(var),   4 * torch.sin(var),  0.7])
        c2w = torch.zeros((4,4))
        c2w[:3,:3] = R
        c2w[:3, 3] = T
        c2w[3][3] = 1.0
        w2c = c2w.inverse()
        viewmatrix = w2c.T
        campos = T
        projection_matrix = torch.tensor([[2.7778, 0.0000, 0.0000, 0.0000],
                                          [0.0000, 2.7778, 0.0000, 0.0000],
                                          [0.0000, 0.0000, 1.0001, 1.0000],
                                          [0.0000, 0.0000, -0.0100, 0.0000]]).to(torch.float)
        projmatrix = viewmatrix @ projection_matrix
        Raster.run(viewmatrix, projmatrix, campos)
        image = Raster.image.numpy()
        image = (image * 255).astype(np.uint8)
        image = Image.fromarray(image)
        image.save('output/' + str(int(i/2)).zfill(5) + '.png', 'png')
        total_time += Raster.total_time
    # print(f'180张图总时间:{total_time}s, 平均splat:{Raster.avg_splat.float()/180}个')


import json
def mse(img1, img2):
    return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)

def psnr(img1, img2):
    mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
    return 20 * torch.log10(1.0 / torch.sqrt(mse))

def eval(model_name):
    viewmatrix = torch.zeros(4,4)
    projection_matrix = torch.tensor([[2.7778, 0.0000, 0.0000, 0.0000],
                                      [0.0000, 2.7778, 0.0000, 0.0000],
                                      [0.0000, 0.0000, 1.0001, 1.0000],
                                      [0.0000, 0.0000, -0.0100, 0.0000]]).to(torch.float)
    projmatrix = torch.zeros(4,4)
    campos = torch.Tensor([-3.9643,  0.1849,  0.7074])
    model_path = 'model/' + model_name.split('/')[0] + '.npz'
    json_path = 'model/' + model_name.split('/')[0] + '_test.json'
    Raster = MyRaster(model_path, viewmatrix, projmatrix, campos)
    f = open(json_path)
    content = f.read()
    content = json.loads(content)
    f.close()
    num_gt = len(content['frames'])
    for i in trange(num_gt):
        c2w = torch.tensor(content['frames'][i]['transform_matrix'])
        c2w[:,1:3][0:3] *= -1
        campos = c2w[:,3][:3]
        w2c = c2w.inverse()
        viewmatrix = w2c.T
        projmatrix = viewmatrix @ projection_matrix
        campos = campos.clone()
        Raster.run(viewmatrix, projmatrix, campos)
        image = Raster.image.numpy()
        image = (image * 255).astype(np.uint8)
        image = Image.fromarray(image)
        rendered_image_name = 'output/' + str(i).zfill(5) +'.png'
        image.save(rendered_image_name, 'png')

if __name__ == '__main__':
    # main("model/drums.npz")
    eval("Mic")
