'''
utils
author: Liuhao Ge
'''
import torch
import torch.nn as nn
from torch.autograd import Variable
import pdb

import numpy as np
import matplotlib.pyplot as plt
import cv2
import os


class PointCloudVisualizer:
    def __init__(self, points, frame_rate=2, frame_size=(1920, 1080),frame_count=0):
        self.points = points
        self.frame_rate = frame_rate
        self.frame_width, self.frame_height = frame_size
        self.connections = [
            (7, 2), (2, 0), (0, 5), (5, 8), (9, 10), (17, 19), (17, 15), (19, 15), (15, 21),
            (15, 13), (13, 11), (11, 23), (23, 25), (25, 27),
            (11, 12), (12, 24), (23, 24), (24, 26), (26, 28),
            (27, 31), (27, 29), (29, 31), (28, 30), (28, 32), (30, 32),
            (12, 14), (14, 16), (16, 22), (16, 22), (16, 18), (16, 20), (18, 20)
        ]

        self.frame_count = frame_count
    def process_and_plot_points(self, theta_degrees=85):
        if not os.path.exists('frames'):
            os.makedirs('frames')
        # 旋转并保存图片
        theta = np.radians(theta_degrees)
        cos_theta, sin_theta = np.cos(theta), np.sin(theta)
        R_y = np.array([[cos_theta, 0, sin_theta], [0, 1, 0], [-sin_theta, 0, cos_theta]])
        for i, single_points in enumerate(self.points):
            rotated_points = np.dot(single_points, R_y)  # 应用旋转矩阵
            plt.figure()
            for connection in self.connections:
                point1, point2 = connection
                x = [rotated_points[point1][0], rotated_points[point2][0]]
                y = [rotated_points[point1][1], rotated_points[point2][1]]
                plt.plot(x, y)
            plt.scatter(rotated_points[:, 0], rotated_points[:, 1], s=10)
            plt.xlabel('X Coordinate')
            plt.ylabel('Y Coordinate')
            plt.title('2D Projection of 3D Point Cloud')
            plt.axis('equal')
            frame_filename = f'frames/frame_{self.frame_count:04d}.png'
            plt.savefig(frame_filename)
            plt.close()
            self.frame_count += 1  # 更新帧计数器

    def create_video(self):
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        video_writer = cv2.VideoWriter('output_video.mp4', fourcc, self.frame_rate,
                                       (self.frame_width, self.frame_height))
        frame_files = sorted([f for f in os.listdir('frames') if f.endswith('.png')])
        for frame_file in frame_files:
            img = cv2.imread(os.path.join('frames', frame_file))
            img_resized = cv2.resize(img, (self.frame_width, self.frame_height))
            video_writer.write(img_resized)
        video_writer.release()




def group_points(points, opt):
    # group points using knn and ball query
    # points: B * 1024 * 6
    cur_train_size = len(points)
    inputs1_diff = points[:,:,0:3].transpose(1,2).unsqueeze(1).expand(cur_train_size,opt.sample_num_level1,3,opt.SAMPLE_NUM) \
                 - points[:,0:opt.sample_num_level1,0:3].unsqueeze(-1).expand(cur_train_size,opt.sample_num_level1,3,opt.SAMPLE_NUM)# B * 512 * 3 * 1024
    inputs1_diff = torch.mul(inputs1_diff, inputs1_diff)    # B * 512 * 3 * 1024
    inputs1_diff = inputs1_diff.sum(2)                      # B * 512 * 1024
    dists, inputs1_idx = torch.topk(inputs1_diff, opt.knn_K, 2, largest=False, sorted=False)  # dists: B * 512 * 64; inputs1_idx: B * 512 * 64
        
    # ball query
    invalid_map = dists.gt(opt.ball_radius) # B * 512 * 64
    for jj in range(opt.sample_num_level1):
        inputs1_idx[:,jj,:][invalid_map[:,jj,:]] = jj
        
    idx_group_l1_long = inputs1_idx.view(cur_train_size,opt.sample_num_level1*opt.knn_K,1).expand(cur_train_size,opt.sample_num_level1*opt.knn_K,opt.INPUT_FEATURE_NUM)
    inputs_level1 = points.gather(1,idx_group_l1_long).view(cur_train_size,opt.sample_num_level1,opt.knn_K,opt.INPUT_FEATURE_NUM) # B*512*64*6

    inputs_level1_center = points[:,0:opt.sample_num_level1,0:3].unsqueeze(2)       # B*512*1*3
    inputs_level1[:,:,:,0:3] = inputs_level1[:,:,:,0:3] - inputs_level1_center.expand(cur_train_size,opt.sample_num_level1,opt.knn_K,3)
    inputs_level1 = inputs_level1.unsqueeze(1).transpose(1,4).squeeze(4)  # B*6*512*64
    inputs_level1_center = inputs_level1_center.contiguous().view(-1,1,opt.sample_num_level1,3).transpose(1,3)  # B*3*512*1
    return inputs_level1, inputs_level1_center
    #inputs_level1: B*INPUT_FEATURE_NUM*sample_num_level1*knn_K, inputs_level1_center: B*3*sample_num_level1*1
    
def group_points_2(points, sample_num_level1, sample_num_level2, knn_K, ball_radius):
    # group points using knn and ball query
    # points: B*(3+128)*512
    cur_train_size = points.size(0)
    inputs1_diff = points[:,0:3,:].unsqueeze(1).expand(cur_train_size,sample_num_level2,3,sample_num_level1) \
                 - points[:,0:3,0:sample_num_level2].transpose(1,2).unsqueeze(-1).expand(cur_train_size,sample_num_level2,3,sample_num_level1)# B * 128 * 3 * 512
    inputs1_diff = torch.mul(inputs1_diff, inputs1_diff)    # B * 128 * 3 * 512
    inputs1_diff = inputs1_diff.sum(2)                      # B * 128 * 512
    dists, inputs1_idx = torch.topk(inputs1_diff, knn_K, 2, largest=False, sorted=False)  # dists: B * 128 * 64; inputs1_idx: B * 128 * 64
        
    # ball query
    invalid_map = dists.gt(ball_radius) # B * 128 * 64, invalid_map.float().sum()
    #pdb.set_trace()
    for jj in range(sample_num_level2):
        inputs1_idx.data[:,jj,:][invalid_map.data[:,jj,:]] = jj

    idx_group_l1_long = inputs1_idx.view(cur_train_size,1,sample_num_level2*knn_K).expand(cur_train_size,points.size(1),sample_num_level2*knn_K)
    inputs_level2 = points.gather(2,idx_group_l1_long).view(cur_train_size,points.size(1),sample_num_level2,knn_K) # B*131*128*64

    inputs_level2_center = points[:,0:3,0:sample_num_level2].unsqueeze(3)       # B*3*128*1
    inputs_level2[:,0:3,:,:] = inputs_level2[:,0:3,:,:] - inputs_level2_center.expand(cur_train_size,3,sample_num_level2,knn_K) # B*3*128*64
    return inputs_level2, inputs_level2_center
    # inputs_level2: B*131*sample_num_level2*knn_K, inputs_level2_center: B*3*sample_num_level2*1