'''
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Copyright (c) 2009 bryan p. conrad
bconrad2@gmail.com

Created on: 7/26/2009

Version: 0.1

@author: bryan

Utilities to support basic JointTrack functions.
'''
import JT
import vtk
import csv
import os
import time
import numpy as np
import win32gui
import msvcrt
from marker_data_read import marker_data_read

def mag(vector):
    '''
    return the magnitude of the given vector
    '''
    if type(vector).__name__ in ('int','float'):
        return vector
    m = 0
    for i in vector:
        m += i**2
    return m**0.5

def save_rms(filename,data,tag=''):
    """
    Given a filename and an array of 2D data, this function will save the
    data as a text file with headers in the first row. 
    """
    data = np.asarray(data)
    n_frames = len(data)
    out_string = 'RMS Error Data - '+tag+'\t# Frames:'+str(n_frames)+'\n'
    out_string += ('Dim\tRMS_Error\n')
    for i in range(data.shape[0]):
        out_string += ('Dim %i\t%f\n' % 
                      (i,data[i]))
    fid = open(filename,'w')
    fid.write(out_string)
    fid.close()


def rms(data,max_frame=None):
    """
    This function will iterate over each dimension in the given array and 
    calculate the rms error for the frame column.
    
    data is a numpy array [Nx6], where each element is an array -> 
    [Frame,Dimension]
     
    max_frame [optional] is the last frame that should be considered when 
    calculating the RMS.
      
    output is numpy array[DIM] -> [Dimension] of rms errors
    """
    data = np.asarray(data)
    if max_frame == None:
        max_frame = len(data)
    #print "max_frame: ", max_frame
    # Create an array to store resutls 
    n_dims = data.shape
    
    if len(n_dims) == 1:
            # Iterate over each dimension and take a slice of frames
            col = data[:max_frame]
            #print "RMS -> Max Error: %f, Min Error: %f" % (col.max(),col.min())
            rms_result = np.sqrt((col*col).sum()/len(col))
    else:
        rms_result = np.zeros(n_dims[-1])
        for i in range(data.shape[-1]):
            # Iterate over each dimension and take a slice of frames
            col = data[:max_frame,i]
            #print "RMS -> Max Error: %f, Min Error: %f" % (col.max(),col.min())
            rms_result[i] = np.sqrt((col*col).sum()/len(col))
    return rms_result
    
def test_rms():
    """
    a little test for the rms function
    """
    # Create an array of [1x1x1x10x1]
    n = 1000
    n = int(raw_input("how many samples (more than 1000): "))
    a = np.ones((1,1,1,n,1))
    a[0,0,0,:,0]=np.sin(np.linspace(0,2*np.pi,n))
    b=array_rms(a)
    print "for %i values, b = %f" % (n,float(b))
    print "b should approach 1/sqrt(2)->[%f]" % float(1/np.sqrt(2))


def genericImageReader(filename):
    """
    A generic VTK image reader.
    filename: a path 
        the appropriate reader will be selected based on the filename extension.
    
    output: VTK image data (reader.GetOutput())
    """
    ext = filename[-3:].lower()
    if (ext == "mha" or ext == 'mhd'):
        # mha image can handle float pixel values
        reader = JT.vtk.vtkMetaImageReader()
    elif (ext == "png"):
        # png image can can be UC or US
        reader = JT.vtk.vtkPNGReader()
    elif (ext == "bmp"):
        # bmp image will be written as UC regardless of input type
        reader = JT.vtk.vtkBMPReader()
    elif (ext == "jpg"):
        # jpg image must be UC
        reader = JT.vtk.vtkJPEGReader()
    elif (ext == "tiff" or ext == "tif"):
        # jpg image must be UC
        reader = JT.vtk.vtkTIFFReader()
    else:
         print "ERROR: No reader implemented for %s files, nothing done" % ext
         reader = JT.vtk.vtkMetaImageReader()

    reader.SetFileName(filename)
    try:
        reader.Update()
        return reader.GetOutput()
    except:
        print "Error reading file: %s" % filename

def genericImageWriter(image, filename):
    """
    A generic VTK image writer.
    image: VTK image data
    filename: a path 
        the appropriate writer will be selected based on the filename extension.
    """
    ext = filename[-3:].lower()
    if (ext == "mha" or ext == 'mhd'):
        # mha image can handle float pixel values
        writer = JT.vtk.vtkMetaImageWriter()
        writer.SetCompression(0)
    elif (ext == "png"):
        # png image can can be UC or US
        writer = JT.vtk.vtkPNGWriter()
    elif (ext == "bmp"):
        # bmp image will be written as UC regardless of input type
        writer = JT.vtk.vtkBMPWriter()
    elif (ext == "jpg"):
        # jpg image must be UC
        writer = JT.vtk.vtkJPEGWriter()
    elif (ext == "tiff" or ext == "tif"):
        # jpg image must be UC
        writer = JT.vtk.vtkTIFFWriter()
    else:
         print "ERROR: No writer implemented for %s files, nothing done" % ext

    writer.SetFileName(filename)
    writer.SetInput(image)
    try:
        writer.Write()
    except:
        print "Error writing file: %s" % filename
        
def vtk2numpy(vtk_image):
    """
    A convenience function for using numpy_support to convert a vtk image to
    a numpy array. 
    """
    numpy_image = JT.numpy_support.vtk_image_to_numpy(vtk_image)
    return numpy_image


def GetVTKMatrixFromNumpyArray(npArray):
        """
        Return a VTK matrix from a numpy 4x4 array.
        """
        if (npArray.shape != (4,4)):
            print "Error: numpy array is not 4x4, cannot convert to VTK matrix"
            return
        
        vtkMatrix = JT.vtk.vtkMatrix4x4()
        
        for row in range(npArray.shape[0]):
            for col in range(npArray.shape[1]):
                vtkMatrix.SetElement(row,col,npArray[row][col])
        
        return vtkMatrix
    
def GetNumpyArrayFromVTKMatrix(vtkMatrix):
    """
    Return a numpy array from a VTK Matrix
    """
    try:
        # Make sure that the user suppies a valid VTK matrix
        # This might also work if a transform is given
        vtkMatrix.GetClassName() == 'vtkMatrix4x4'
    except:
        print "Input is not recognized as a 'vtkMatrix4x4'"
        return
    
    npArray = np.eye(4)
    
    for row in range(npArray.shape[0]):
        for col in range(npArray.shape[1]):
            npArray[row][col] = vtkMatrix.GetElement(row,col,)
    
    return npArray

def FindMTre(pose,proj_norm=None,ground_truth=[0,0,0,0,0,0]):
    """
    Given a pose and the ground truth pose, both in 1D vector format 
    (Tx,Ty,Tz,Rx,Ry,Rz) with rotations in radians, this function will 
    return the mTRE as a float.
    
    The mTRE is one way to measure the accuracy of a 
    registration algorithm.  For the Gold Standard data, the ground truth pose
    is [0,0,0,0,0,0].  
    
    mTRE is calculated by sampling a grid of points from within a clinically 
    relevant region.  This is specified as a region 95x45x95 pixels 
    (0.87mm^3/voxel).  This grid is sampled every 10 points to speed up the 
    algorithm (500 points vs 400,000).
    
    If the mTRE in the projection direction (mTREproj) is desired, the optional
    parameter projection_normal should be supplied.  The projection_normal is 
    a unit vector that describes the direction from the detector to the source. 
    
    """
    Tgold_wcs = GetTransform(ground_truth)
    Tgold_wcs_inv = np.linalg.inv(Tgold_wcs)

    Treg_vol = GetTransform(pose)
    Treg_wcs = np.dot(Treg_vol,Tgold_wcs)
    Treg_wcs_inv = np.linalg.inv(Treg_wcs)
    
    # n is the unit vector projecting out of the imaging plane toward
    # the source
    if proj_norm != None:
        n = proj_norm
        
    err = 0
    p_err = 0
    i = 0
    for x in range(-47,48,10):
        for y in range(-22,23,10):
            for z in range(-47,48,10):
                p = np.array([x,y,z,1]) * 0.87
                p[3] = 1
                Preg = np.dot(Treg_wcs_inv,p)
                Pgold = np.dot(Tgold_wcs_inv,p)
                err = err + np.sqrt(((Preg - Pgold)**2).sum())
                if proj_norm != None:
                    p_err = p_err + np.sqrt((np.dot(Preg[0:3] - Pgold[0:3],n)**2).sum())
                i = i + 1
    
    mTRE = err/i
    mTRE_proj = p_err/i
    print "mTRE = %f" % (mTRE)
    
    if proj_norm is not None:
        # if a projection normal is given, return the mTRE in projection 
        # direction, otherwise only give back the mTRE
        return mTRE, mTRE_proj
    else:
        return mTRE



class postProcess(object):
    """
    a class to do some post processing on result data from registrations.
    Currently, it will read in a list of poses (Tx,Ty,Tz,Rx,Ry,Rz) and 
    calculate the mTRE (mean Target Registration Error).  Assumes that the 
    ground truth pose is at [0,0,0,0,0,0].
    """
    def __init__(self,filepath):
        [path_dir, filename] = os.path.split(filepath) 
        [basename, ext] = os.path.splitext(filename)
        open_file = open(filepath,'r')
        self.reader = csv.reader(open_file,delimiter='\t')
        save_path = os.path.join(path_dir,basename + '_out.csv')
        self.save_file = open(save_path,'w')
        self.writer = csv.writer(self.save_file)
    
    def read_data(self):
        """
        """
        data = []
        for line in self.reader:
            for element in line:
                try:
                    data.append(float(element))
                except:
                    data.append(9999)
            
        self.data = np.asarray(data,'f')
        self.data = self.data.reshape(-1,6)
        print self.data.shape
        
        
    def mTRE(self):
        """
        """
        mTRE = np.zeros(len(self.data))
        for pose in self.data:
            # do calculation and write to file in one step
            [mTRE,mTRE_proj] = FindMTre(pose,proj_normal=[])
            self.writer.writerow([])
        
        self.save_file.close()
        
    
    def write_data(self):
        """
        done in mTRE
        """
        
        pass
        
    
    def run(self):
        self.read_data()
        self.mTRE()
        

def ConvertMcal(filename):
    """
    mcal uses the camera calibration toolbox to perfrom calibration.
    The results are saved in an m file and must be converted to be 
    used in JointTrack.
    """
    file = open(filename)
    raw_data = file.read()
    data = raw_data.replace(';','').split()
    file.close()
    #data = raw_data.split()
    for i,el in enumerate(data):
        if el == 'Tc_1':
            #print "%s: %s, %s, %s" % (el,data[i+3],data[i+4],data[i+5])
            Tc_1 = [float(data[i+3]),float(data[i+4]),float(data[i+5])]
        if el == 'fc':
            #print "%s: %s, %s" % (el,data[i+3],data[i+4])
            fc = [float(data[i+3]),float(data[i+4])]
        if el == 'cc':
            #print "%s: %s, %s" % (el,data[i+3],data[i+4])
            cc = [float(data[i+3]),float(data[i+4])]
        if el == 'nx':
            #print "%s: %s" % (el,data[i+2])
            nx = float(data[i+2])
        if el == 'ny':
            #print "%s: %s" % (el,data[i+2])
            ny = float(data[i+2])
    
    focal_len = abs(Tc_1[2])
    pix_size = focal_len / np.mean(fc)
    
    # in image coordinates, i.e. (0,0) is top-left pixel.
    centerX = (nx-1)/2.0
    centerY = (ny-1)/2.0
    
    #xoff and yoff in image coordinates
    xoffim = cc[0] - centerX
    yoffim = cc[1] - centerY
    
    # xoff and yoff in JointTrack/KneeTrack coordinates (y upward)
    xoff = xoffim * pix_size
    yoff = -yoffim * pix_size
    
    new_name = filename.replace('.','_') + '.cal'
    outfile = open(new_name, 'w')    #mode: write, text
    outfile.write('JT_INTCALIB\n')
    outfile.write('%g\n%g\n%g\n%g\n%g\n%g\n' % 
                  (focal_len, xoff, yoff, pix_size, nx, ny))
    outfile.close()

def convert_JTv1_to_numpy(filename_list):
    """
    Convert old JTv1 (JD's version) to numpy format compatible wiht pytrack 
    project files.  filename_list is a list of file paths to individual model
    poses during the same trial (eg, prox and dist implant).
    """
    # the implant coordiante system is not aligned with the bone system
    # This transformation will fix that:
    T_implant_bone = np.asarray([[0,1,0,0],[0,0,1,0],[1,0,0,0],[0,0,0,1]])
    
    
    if type(filename_list) != type([]):
        filename_list = [filename_list]
    data = []
    trial_data = []
    for model_path in filename_list:
        model_data = []
        fid = open(model_path)
        reader = csv.reader(fid)
        headers = reader.next()
        for row in reader:
            # First colum is frame number, last column is isOptimized
            implant_pose = np.asarray(row[1:7],'f')
            trans_implant_pose = np.zeros(implant_pose.shape)
            trans_implant_pose[0] = -implant_pose[2]
            trans_implant_pose[1] = -implant_pose[0]
            trans_implant_pose[2] = -implant_pose[1]
            trans_implant_pose[3] = implant_pose[5]
            trans_implant_pose[4] = -implant_pose[3]
            trans_implant_pose[5] = -implant_pose[4]
            model_data.append(trans_implant_pose)
        model_data = np.asarray(model_data,'f')
        # Convery from inches to mm
        model_data[:,0:3] *= 25.4
        # Convert from degrees to radians
        model_data[:,3:6] *= np.pi/180
        trial_data.append(model_data)
    data = np.asarray(trial_data,'f')
    # Reshape data to match pyTrack from (implant, frame, dim) -> (frame,implant,dim)
    shape = data.shape
    data_reshape = np.zeros((shape[1],shape[0],shape[2]))
    for i in range(shape[0]):
        data_reshape[:,i,:] = data[i,:,:]

    return data_reshape

def trc_viewer(filename):
    """
    This function will create a vtk render window with polygonal surface models
    at the location of each marker.  TODO: allow the data to be played/animated. 
    input: filename - a path to a trc (tracked ascii) file    
    """

    def char_event(obj,event):
        key = obj.GetKeySym()
        if key == 'p':
            print "Playing"
            play()
    def play():
        for frame in range(50):
            print "Frame #%i" % frame
            #key = iren.GetKeySym()
            if msvcrt.kbhit():
                key = msvcrt.getch()
            else:
                key = ''
            print key
            if key == 'x':
                print "quiting"
                return
            else:
                for i,marker in enumerate(marker_list):     
                    marker.SetPosition(data[frame,i,:])
                renWin.Render()
                win32gui.PeekMessage(0,0,0,0)
                time.sleep(.2)
                
    renWin = vtk.vtkRenderWindow()
    ren = vtk.vtkRenderer()
    iren = vtk.vtkRenderWindowInteractor()
    style = iren.GetInteractorStyle()
    style.SetCurrentStyleToTrackballCamera()
    iren.AddObserver("CharEvent",char_event)
    
    iren.SetRenderWindow(renWin)
    renWin.AddRenderer(ren)
    renWin.SetSize(512,512)

    print "Processing: %s" % filename
    [n_frames, data, marker_names, data_rate, headers] = marker_data_read(filename)
    marker_list = []
    for m,marker in enumerate(marker_names):
        sphere = create_sphere(size=5)
        sphere.SetPosition(data[0,m,:])
        ren.AddActor(sphere)
        marker_list.append(sphere)

    renWin.Render()
    iren.Start()

def make_marker_stl(trc_path, model_output_path, marker_list=[],
                    marker_size=5,color=(0,0,1),frame_range=[0,1],show=False):
    """
    Create an stl file from a trc, placing a sphere at the position defined 
    by each marker in the file.  
    
        trc_path is a trc filepath, which will be used to create the model.  
        marker list is a list of integers representing the markers 
            that should appear in the model.  
        frame_range is a first and last frame that should be averaged together to 
            determine the marker positions.  
        marker_size is the radius of the markers
        If show is True, a renderwindow will be displayed showing the model.
    """
    
    #motion_path = r'C:\Users\bryan\phd\data\Spine Motion\Spine_072106\Processed'
    #trc_list=os.listdir(motion_path)

    print "Making model from: %s, Frames: %i-%i" % (os.path.basename(trc_path),
                                                    frame_range[0],
                                                    frame_range[1])
                                                    
    [n_frames, data, marker_names, data_rate, headers] = marker_data_read(trc_path)
    # Make all markers relative to first marker
    if marker_list == []:
        marker_list = range(1,len(marker_names))
    append_filter = vtk.vtkAppendPolyData()
    first_marker = data[frame_range[0]:frame_range[1],marker_list[0],:].mean(0)
    for m in marker_list:
        position = data[frame_range[0]:frame_range[1],m-1,:].mean(0)-first_marker
        transform = vtk.vtkTransform()
        tpdf = vtk.vtkTransformPolyDataFilter()
        tpdf.SetTransform(transform)
        transform.Translate(position)
        sphere = JT.vtk.vtkSphereSource()
        sphere.SetRadius(marker_size)
        sphere.SetPhiResolution(8)
        sphere.SetThetaResolution(8)
        tpdf.SetInput(sphere.GetOutput())
        append_filter.AddInput(tpdf.GetOutput())
    
    mapper = JT.vtk.vtkPolyDataMapper()
    cluster_actor = JT.vtk.vtkActor()
    mapper.SetInput(append_filter.GetOutput())
    cluster_actor.SetMapper(mapper)
    props = cluster_actor.GetProperty()
    props.SetColor(color)
        
    stl_writer = vtk.vtkSTLWriter()
    stl_writer.SetInput(append_filter.GetOutput())
    stl_writer.SetFileTypeToASCII()
    stl_writer.SetFileName(model_output_path)
    stl_writer.Update()
    stl_writer.Write()

    print "Model Saved to file: %s." % model_output_path
    print "Model Position, Origin, Center: %s, %s, %s" % (str(cluster_actor.GetPosition()),
                                              str(cluster_actor.GetOrigin()),
                                              str(cluster_actor.GetCenter()))
    
    if show:
        renWin = vtk.vtkRenderWindow()
        ren = vtk.vtkRenderer()
        iren = vtk.vtkRenderWindowInteractor()
        iren.SetRenderWindow(renWin)
        renWin.AddRenderer(ren)
        renWin.SetSize(512,512)
        ren.AddActor(cluster_actor)
        renWin.Render()
        iren.Start()

def create_sphere(size=5,color=(1,0,0)):
    # Add axes to renderer
    sphere = JT.vtk.vtkSphereSource()
    sphere.SetRadius(size)
    sphere.SetPhiResolution(10)
    mapper = JT.vtk.vtkPolyDataMapper()
    sphere_actor = JT.vtk.vtkActor()
    mapper.SetInput(sphere.GetOutput())
    sphere_actor.SetMapper(mapper)
    props = sphere_actor.GetProperty()
    if color == 'orange':
        color = (255.0/255.0, 74.0/255.0, 0)
    elif color == 'blue':
        color = (0, 33.0/255.0, 165.0/255.0)
    props.SetColor(color)
    return sphere_actor

def create_cone(size=30,color=(1,0,0)):
    cone = vtk.vtkConeSource()
    cone.SetResolution(size)
    cone_mapper = vtk.vtkPolyDataMapper()
    cone_mapper.SetInput(cone.GetOutput())
    cone_actor = vtk.vtkActor()
    cone_actor.SetMapper(cone_mapper)
    props = cone_actor.GetProperty()
    if color == 'orange':
        color = (255.0/255.0, 74.0/255.0, 0)
    elif color == 'blue':
        color = (0, 33.0/255.0, 165.0/255.0)
    props.SetColor(color)
    return cone_actor

def setup_renwin():
    ren = vtk.vtkRenderer()
    renwin = vtk.vtkRenderWindow()
    style = vtk.vtkInteractorStyleTrackballCamera()
    iren = vtk.vtkRenderWindowInteractor()
    renwin.AddRenderer(ren)
    iren.SetRenderWindow(renwin)
    iren.SetInteractorStyle(style)
    renwin.ren = ren
    return renwin

if __name__ == "__main__":
#    test()
    trc_path = r'C:\Users\bryan\phd\data\Spine Motion\Spine_072106\Processed\Spine2_5.trc'
    model_output_path = r'C:\Users\bryan\phd\data\Spine2\CT\Spine2_C7_mrks.stl'
#    trc_viewer(trc_path)
    # Spine2-C6: [13,14,15],Spine2-C7: [16,17,18] 
    make_marker_stl(trc_path, model_output_path, marker_size=5,
                    marker_list=[16,17,18], frame_range=[71,81], show=True)
