# depth2cloud.py  - convert Kinect depth image into 3D point cloud, in PLY format
# Jessica Chin: Engineers for Exploration, National Geographic. UCSD


import sys
from numpy import *
import binascii

minDistance = -10
scaleFactor = 0.0021


def usage():
  print '\ndepth2cloud  <depth_file> <rgb_file> <mesh_only> <mesh_color>\n'
  exit(0)

  
if len(sys.argv) != 5:
  usage()
depth_file = sys.argv[1]
rgb_file = sys.argv[2]
cloud_file = sys.argv[3]
color_file = sys.argv[4]

print 'Reading depth image...\n'
fid = open(depth_file)
depth_lines = fid.readlines()
depth_raw = []
print 'Processing depth image...\n'
# Compute depth (unit in cm) from raw 11-bit disparity value
# According to ROS site
for i in range(1, len(depth_lines)):
    for v in depth_lines[i]:
        num = binascii.hexlify(v)
        #print float(100/(-0.00307*int(num, 16) + 3.33))
        depth_raw.append(float(100/(-0.00307*int(num, 16) + 3.33)))

print 'Reading rgb image...\n'
fid = open(rgb_file)
rgb_lines = fid.readlines()
rgb_raw = []
print 'Processing rgb image...\n'
for i in range(1, len(rgb_lines)):
    for v in rgb_lines[i]:
        num = binascii.hexlify(v)
        rgb_raw.append(int(num, 16))


rgb= [ [ 0 for i in range(640) ] for j in range(480) ]
depth = [ [ 0 for i in range(640) ] for j in range(480) ]      
temp = [0,0,0]
n = 0; i = 0; j = 0;
while n < len(rgb_raw):
    #print "n: %d i: %d j: %d value: %d"%(n,i,j, rgb_raw[n])
    temp[n%3] = rgb_raw[n]
    if n%3 == 2:
        rgb[i][j] = temp
        temp = [0,0,0]
        #print rgb
        j += 1
        if j == 640:
            i += 1
            j = 0
    n += 1

print 'Generating cloud files...\n'
fc = open(cloud_file, 'wt')
fc.write('ply\n')
fc.write('format ascii 1.0\n')
fc.write('comment : created from Kinect depth image and color image\n')
#fc.write('element vertex %d\n' % len(raw))
fc.write('element vertex %d\n' % 307200)
fc.write('property float x\n')
fc.write('property float y\n')
fc.write('property float z\n')
fc.write('end_header\n')

fc2 = open(color_file, 'wt')
fc2.write('ply\n')
fc2.write('format ascii 1.0\n')
fc2.write('comment : created from Kinect depth image\n')
#fc.write('element vertex %d\n' % len(raw))
fc2.write('element vertex %d\n' % 307200)
fc2.write('property float x\n')
fc2.write('property float y\n')
fc2.write('property float z\n')
fc2.write('property uchar red\n')
fc2.write('property uchar green\n')
fc2.write('property uchar blue\n')
fc2.write('end_header\n')


# Convert from pixel ref (i, j, z) to 3D space (x,y,z)
for i in range(480):
    for j in range(640):
        z = depth[i][j]
        x = (i - 480 / 2) * (z + minDistance) * scaleFactor
        y = (640 / 2 - j) * (z + minDistance) * scaleFactor
        r = rgb[i][j][0]
        g = rgb[i][j][1]
        b = rgb[i][j][2]
        #y = (j - 640 / 2) * (z + minDistance) * scaleFactor
        fc.write("%f  %f  %f\n" % (x, y, z))
        fc2.write("%f  %f  %f  %d  %d  %d\n" % (x, y, z, r, g, b))
        
fc.close
print 'Done!'

