#! /usr/bin/env python
import os
import sys
import shutil
import time
import itk
import subprocess
from sage.all import *

##########################
# constants for particular analysis, change to fit.
# TODO: have these data read from the (header of the) image 
##########################
#DIM_x=512
#DIM_y=512
#DIM_z=90
#PIX_DIM_x=0.075
#PIX_DIM_y=0.075
#PIX_DIM_z=0.25

BIN="/home/xqwang/projects/FXTAS/tools/bin"

LANDMARK_LABLE=dict(geometric=1,skull=2)
##########################   utilities ################################
def image_dimension(image_file):
    dim_str=subprocess.Popen(["printVoxDims",image_file], stdout=subprocess.PIPE).communicate()[0].strip().split()
    return int(dim_str[0]), int(dim_str[1]), int(dim_str[2]), RR(dim_str[3]), RR(dim_str[4]), RR(dim_str[5])

def mask_PCA(file_image):
    (DIM_x, DIM_y, DIM_z, VOX_MM_x, VOX_MM_y, VOX_MM_z)=image_dimension(file_image) 
    image_type=itk.Image[itk.US,3]
    reader=itk.ImageFileReader[image_type].New()
    reader.SetFileName(file_image)
    reader.Update()

    itk_py_converter=itk.PyBuffer[image_type]
    image_array=itk_py_converter.GetArrayFromImage(reader.GetOutput())

    (z_range,y_range,x_range)=image_array.shape

    VL=[]
    for z in range(z_range):
        #logger.debug("z is now: %d", z)
        for y in range(y_range):
            for x in range(x_range):
                if image_array[z,y,x] > 0:
                    VL.append(vector([x*VOX_MM_x,y*VOX_MM_y,z*VOX_MM_z]))

    return principal_component_decomposition(VL) 

def PCA_align(from_brain, to_brain): # from_brain and to_brain are assumed to be .sobj file that record the PCA data
    f_center, f_eigen=load(from_brain)
    t_center, t_eigen=load(to_brain)

    f_lambda=diagonal_matrix([f_eigen[1][0],f_eigen[2][0],f_eigen[0][0]])
    f_rotation=matrix([f_eigen[1][1][0],f_eigen[2][1][0],f_eigen[0][1][0]]).transpose()
    f_translation=matrix(f_center).transpose()

    t_lambda=diagonal_matrix([t_eigen[1][0],t_eigen[2][0],t_eigen[0][0]])
    t_rotation=matrix([t_eigen[1][1][0],t_eigen[2][1][0],t_eigen[0][1][0]]).transpose()
    t_translation=matrix(t_center).transpose()

    f_rotation_affine = block_diagonal_matrix([f_rotation,identity_matrix(1)])
    f_dilation_affine = block_diagonal_matrix([f_lambda,identity_matrix(1)])
    f_translation_affine = block_matrix([Integer(1),f_translation,Integer(0),Integer(1)])

    t_rotation_affine = block_diagonal_matrix([t_rotation,identity_matrix(1)])
    t_dilation_affine = block_diagonal_matrix([t_lambda,identity_matrix(1)])
    t_translation_affine = block_matrix([Integer(1),t_translation,Integer(0),Integer(1)])

    transformation=t_translation_affine*t_rotation_affine*t_dilation_affine*f_dilation_affine.inverse()*f_rotation_affine.inverse()*f_translation_affine.inverse()

    return transformation 

def landmark_PCA(mllm,labels): # mllm -- multi-lable landmark
    if not hasattr(labels,'__iter__'):
       labels=[labels]  # assume that labels is a single numerical label. 

    # if mllm is not an sage object then it is assumed to be an image file   
    try:
        landmarks=load(mllm)
    except: 
        (DIM_x, DIM_y, DIM_z, VOX_MM_x, VOX_MM_y, VOX_MM_z)=image_dimension(mllm) 
        image_type=itk.Image[itk.US,3]
        reader=itk.ImageFileReader[image_type].New()
        reader.SetFileName(mllm)
        reader.Update()
    
        itk_py_converter=itk.PyBuffer[image_type]
        image_array=itk_py_converter.GetArrayFromImage(reader.GetOutput())
    
        (z_range,y_range,x_range)=image_array.shape
    
        landmarks=dict()  # landmarks are vectors grouped by lables
        for z in range(z_range):
            #logger.debug("z is now: %d", z)
            for y in range(y_range):
                for x in range(x_range):
                    label=image_array[z,y,x] # label is given by the intensity of the mllm file
                    if label != 0:
                        try:
                            landmarks[label].append(vector([x*VOX_MM_x,y*VOX_MM_y,z*VOX_MM_z]))
                        except KeyError:
                            landmarks[label]=[vector([x*VOX_MM_x,y*VOX_MM_y,z*VOX_MM_z])]

    
    # average clusters of points to a single landmark location
    VL=[]
    for l in labels:
        # first let us "consolidate" landmark clusters on the same z plane
        # to alow more robust landmarking process. I will alow the landmarker
        # to use a (very tight) cluster of points on the same z plane to identify 
        # one landmark location. Here I am taking the geometric average to identify this 
        # landmark location  
        VL_l=[]
        for v in landmarks[l]:
            f_exists=False
            for i in range(len(VL_l)):
                u,c=VL_l[i]
                if u[2]==v[2] and (u-v).norm() < 0.25: # on the same z plane and are very close
                    u=mean(c*[u]+[v])
                    c+=1
                    VL_l[i]=(u,c)
                    f_exists=True
                    break
            if f_exists==False:
                VL_l.append((v,1))
        landmarks[l]=map(lambda x: x[0], VL_l) 

        # then let us "consolidate" landmark clusters on the same or different z plane,
        # to alow more robust landmarking process. I will alow the landmarker
        # to use a (not so tight) cluster of points on the same z plane to identify 
        # one landmark location. Here I am taking the geometric average to identify this 
        # landmark location  
        VL_l=[]
        for v in landmarks[l]:
            f_exists=False
            for i in range(len(VL_l)):
                u,c=VL_l[i]
                if (u-v).norm() < 2: # kind of close
                    u=mean(c*[u]+[v])
                    c+=1
                    VL_l[i]=(u,c)
                    f_exists=True
                    break
            if f_exists==False:
                VL_l.append((v,1))
        landmarks[l]=map(lambda x: x[0], VL_l) 
        VL+=landmarks[l]
    
    logger.debug("%d landmarks.",len(VL))
        
    mllm_sage_obj=mllm.split(".")[0]
    save(landmarks, mllm_sage_obj)
    
    return principal_component_decomposition(VL) 

def principal_component_decomposition(VL):    
    center,covM=covariance3D(VL)
    eigen=sorted(covM.eigenvectors_right(),key=lambda x: x[0],reverse=True)

    first_component=eigen[0][1][0]
    # first_component should "roughly" follow the z-direction
    if first_component*vector([0,0,1])<0:
        first_component=-first_component
    eigen[0][1][0]=first_component

    second_component=eigen[1][1][0]
    # second_component should "roughly" follow the x-direction
    if second_component*vector([1,0,0])<0:
        second_component=-second_component
    eigen[1][1][0]=second_component

    third_component=eigen[2][1][0]
    # third_component should "roughly" follow the y-direction
    if third_component*vector([0,1,0])<0:
        third_component=-third_component
    eigen[2][1][0]=third_component

    return center,eigen 

def covariance3D(VL):  # VL is a list of 3D points or a matrix with 3D points as rows
    center=(Mx,My,Mz)=mean(VL)
    N=len(VL)
    covM=matrix(RDF,3,3)
    XL=[p[0] for p in VL]
    YL=[p[1] for p in VL]
    ZL=[p[2] for p in VL]
    covM[0,0]=vector([x-Mx for x in XL])*vector([x-Mx for x in XL])/N
    covM[1,1]=vector([y-My for y in YL])*vector([y-My for y in YL])/N
    covM[2,2]=vector([z-Mz for z in ZL])*vector([z-Mz for z in ZL])/N
    covM[0,1]=covM[1,0]=vector([x-Mx for x in XL])*vector([y-My for y in YL])/N
    covM[0,2]=covM[2,0]=vector([x-Mx for x in XL])*vector([z-Mz for z in ZL])/N
    covM[1,2]=covM[2,1]=vector([y-My for y in YL])*vector([z-Mz for z in ZL])/N

    return (center,covM)
    
###################################  individual interface  #######################################
def test(brain,domain,codomain):
    #result_directory=os.path.abspath(os.path.expanduser(result_direcotry)
    time.sleep(10)
    image_file=domain+'/'+brain+".hdr"
    #if not os.path.isdir(codomain): 
    #    os.mkdir(codomain) 
    shutil.copy(image_file,codomain)    

def histogram_match(brain,domain,codomain):
    input_brain_image=domain+"/"+brain+".hdr" 
    output_brain_image=codomain+"/"+brain+".hdr" 
    standard_image="/home/xqwang/projects/FXTAS/mouse/overlap_ROI/1311_021110.hdr"
    if not input_brain_image == standard_image:
        subprocess.check_call(["chg_data", input_brain_image, "-histMatch1", standard_image, "-outname", output_brain_image])
    else:
        shutil.copy(input_brain_image, output_brain_image)

def bias_correction(brain,domain,codomain):
    input_brain_image=domain+"/"+brain+".hdr" 
    output_brain_image=codomain+"/"+brain+".hdr" 
    subprocess.check_call(["c3d", input_brain_image, "-biascorr", "-o", output_brain_image])

def clean_up_mask(brain,domain,codomain):
    ''' The input image is a brain mask, normally a result of manual clean-up from the automatic
        segmentation of itkSNAP. There could be multiple connected components in this image --
        we assume the largest one is the brain; and there may be holes in the brain. 

        We want to pick the largest connected component, and fill in the holes. 
    '''
    brain_mask_file=domain+'/'+brain+'.hdr' 
    new_brain_mask_file=codomain+'/'+brain+'.hdr' 

    if not os.path.exists(new_brain_mask_file) or domain==codomain:
        subprocess.check_call(["c3d", brain_mask_file, 
                                                   "-comp", "-thresh", "1", "1", "1", "0", \
                                                   "-type", "uchar", \
                                                   "-o", new_brain_mask_file])

        subprocess.check_call(["chg_data", new_brain_mask_file, "-holefillSlices","1", \
                                                   "-outname", new_brain_mask_file])

def skull_stripping_by_mask(img_id,domain,codomain):
    ''' 
    Obtain brain tissue from the head image by using the brain mask.
    Input: Image ID, from which the MRI image and the brain mask are located.
    Output: The brain tissue as the skull stripping result 
    '''
    head_image_file=domain+"/"+img_id+'.nii' # hard coded image extention, BAD!
    mouse_id=img_id[0:4]
    year=img_id[4:6]
    month=img_id[6:8]
    day=img_id[8:10]
    new_id=mouse_id+"_"+month+day+year
    brain_mask_file="/home/xqwang/projects/FXTAS/CORRECTED/masks_corrected/"+mouse_id+"_"+month+day+year+".nii" # hard coded image extention, BAD!
    brain_image_file=codomain+'/'+new_id+'.nii' 

    #if not os.path.exists(new_brain_mask_file) or os.stat(new_brain_mask_file).st_ctime <= os.stat(brain_mask_file).st_ctime: 
    #    subprocess.check_call(["c3d", brain_mask_file, 
    #                                               "-comp", "-thresh", "1", "1", "1", "0", \
    #                                               "-smooth", "1x1x1vox",\
    #                                               "-threshold", "0.5", "1", "1", "0", \
    #                                               "-type", "uchar", \
    #                                               "-o", new_brain_mask_file])

    if not os.path.exists(brain_image_file):  #TODO: add time stamp consideration
        subprocess.check_call(["c3d", head_image_file, brain_mask_file, \
                                                   "-multiply", \
                                                   "-o", brain_image_file])


def align_bubbles_image(brain,domain,codomain):
    standard_pca_file=domain+'/landmarks/2027_060310_PCA.sobj'
    #standard_pca_file=domain+'/landmarks/2028_062209_PCA.sobj'
    pca_file=domain+'/landmarks/'+brain+"_PCA.sobj"

    transformation=PCA_align(standard_pca_file,pca_file)

    save(transformation,codomain+"/bubbles/"+brain+"_transformation")

    # bubbles can be describe in two ways -- image (itkSNAP) and location description (itkBet). 
    #### The following code align bubbles in image format:

    # generating "transfile" for Evan's transformL3 routine
    transfile_name=codomain+"/bubbles/"+brain+"_transformation.txt"
    transfile=open(transfile_name,'w')
    transfile.write("{0} {1} {2} {3} \n".format(transformation[0,0],transformation[0,1],transformation[0,2],transformation[0,3]))
    transfile.write("{0} {1} {2} {3} \n".format(transformation[1,0],transformation[1,1],transformation[1,2],transformation[1,3]))
    transfile.write("{0} {1} {2} {3} \n".format(transformation[2,0],transformation[2,1],transformation[2,2],transformation[2,3]))
    transfile.write("{0} {1} {2} {3} \n".format(0,              0,              0,              1))
    transfile.write("200 160 80 \n")
    transfile.write("0.075 0.075 0.25 \n")
    transfile.write("{0} {1} {2} \n".format(0,0,0))
    transfile.write("{0} {1} {2} \n".format(0,0,0))
    transfile.close()

    # invoke Evan's "transformL3" to orthognise the brain image
    standard_bubbles_image=domain+'/bubbles/2027_060310_bubbles.hdr'
    #standard_bubbles_image=domain+'/bubbles/2028_062209_bubbles.hdr'
    new_bubbles_image=codomain+'/bubbles/'+brain+"_bubbles"
    #logger.debug("transformL3 "+image_file+" -transfile "+transfile_name+" -invert "+"-outname "+new_image)
    subprocess.check_call([BIN+"/transformL3", standard_bubbles_image, "-transfile", transfile_name, "-outname", new_bubbles_image])

    # use c3d "-thresh" feature to remove the aliasing artifact
    subprocess.check_call(["c3d", new_bubbles_image+".img", "-thresh", "128", "256", "255", "0", "-o", new_bubbles_image+".img"])
    
def align_bubbles_scf(brain,domain,codomain):
    standard_pca_file=domain+'/2027_060310_PCA.sobj'
    pca_file=domain+'/'+brain+"_PCA.sobj"

    transformation=PCA_align(standard_pca_file,pca_file)

    save(transformation,codomain+"/"+brain+"_transformation")

    # bubbles can be describe in two ways -- image (itkSNAP) and location description (itkBet). 
    #### The following code align bubbles in text format (location description)
    standard_bubbles_file=open(domain+'/2027_060310_bubbles.scf','r')
    #standard_bubbles_file=open(domain+'/2028_062209_bubbles.scf','r')
    bubbles=standard_bubbles_file.readlines()
    bubble_list=bubbles[1:]  # remove header    
    bubble_list=map(lambda b: b.strip().split(','), bubble_list) # remove RET and , 
    bubble_list=map(lambda b: (float(b[0]),float(b[1]),float(b[2]),float(b[3])), bubble_list) 

    new_bubbles_file=open(codomain+'/'+brain+"_bubbles.scf",'w')
    new_bubbles_file.write(bubbles[0]) # write the header
    for bubble in bubble_list:
        center_affine=vector([bubble[0],bubble[1],bubble[2],1])
        radius=bubble[3]
        new_center_affine=transformation*center_affine
        new_bubbles_file.write("{0},{1},{2},{3} \n".format(new_center_affine[0],new_center_affine[1],new_center_affine[2],radius)) 

    new_bubbles_file.close()

def align_bubbles(brain,domain,codomain):
    standard_pca_file=domain+'/landmarks/2027_060310_PCA.sobj'
    pca_file=domain+'/landmarks/'+brain+"_PCA.sobj"

    transformation=PCA_align(standard_pca_file,pca_file)

    save(transformation,codomain+"/bubbles/"+brain+"_transformation")

    # bubbles can be describe in two ways -- image (itkSNAP) and location description (itkBet). 
    #### The following code align bubbles in image format:
    # generating "transfile" for Evan's transformL3 routine
    transfile_name=codomain+"/bubbles/"+brain+"_transformation.txt"
    transfile=open(transfile_name,'w')
    transfile.write("{0} {1} {2} {3} \n".format(transformation[0,0],transformation[0,1],transformation[0,2],transformation[0,3]))
    transfile.write("{0} {1} {2} {3} \n".format(transformation[1,0],transformation[1,1],transformation[1,2],transformation[1,3]))
    transfile.write("{0} {1} {2} {3} \n".format(transformation[2,0],transformation[2,1],transformation[2,2],transformation[2,3]))
    transfile.write("{0} {1} {2} {3} \n".format(0,              0,              0,              1))
    transfile.write("200 160 80 \n")
    transfile.write("0.075 0.075 0.25 \n")
    transfile.write("{0} {1} {2} \n".format(0,0,0))
    transfile.write("{0} {1} {2} \n".format(0,0,0))
    transfile.close()

    # invoke Evan's "transformL3" to orthognise the brain image
    standard_bubbles_image=domain+'/bubbles/2027_060310_bubbles.hdr'
    new_bubbles_image=codomain+'/bubbles/'+brain+"_bubbles"
    #logger.debug("transformL3 "+image_file+" -transfile "+transfile_name+" -invert "+"-outname "+new_image)
    subprocess.check_call([BIN+"/transformL3", standard_bubbles_image, "-transfile", transfile_name, "-outname", new_bubbles_image])
    
    #### The following code align bubbles in text format (location description)
    standard_bubbles_file=open(domain+'/bubbles/2027_060310_bubbles.scf','r')
    bubbles=standard_bubbles_file.readlines()
    bubble_list=bubbles[1:]  # remove header    
    bubble_list=map(lambda b: b.strip().split(','), bubble_list) # remove RET and , 
    bubble_list=map(lambda b: (float(b[0]),float(b[1]),float(b[2]),float(b[3])), bubble_list) 

    new_bubbles_file=open(codomain+'/bubbles/'+brain+"_bubbles.scf",'w')
    new_bubbles_file.write(bubbles[0]) # write the header
    for bubble in bubble_list:
        center_affine=vector([bubble[0],bubble[1],bubble[2],1])
        radius=bubble[3]
        new_center_affine=transformation*center_affine
        new_bubbles_file.write("{0},{1},{2},{3} \n".format(new_center_affine[0],new_center_affine[1],new_center_affine[2],radius)) 

    new_bubbles_file.close()

def thin_slice(brain,domain,codomain):
    '''
    interpolate anisotopic images to isotopic images   
    '''
    image_file=domain+'/'+brain+".hdr"
    new_image_file=codomain+'/'+brain+".hdr"
    
    (DIM_x, DIM_y, DIM_z, VOX_MM_x, VOX_MM_y, VOX_MM_z)=image_dimension(image_file) 
    dim_str="%03dx%03dx%03dvox"%(DIM_x,DIM_y,DIM_z*2)
    subprocess.check_call(["c3d", image_file, "-interpolation", "Cubic", "-resample", dim_str, "-o", new_image_file])
    
 
def geometric_landmark_cut_region(brain,domain,codomain):
    """ 
    Uses geometric landmarks to virtually align head (brain+skull) images.  
    Virtual alignment means to record only the transformation, instead of 
    really transforming the images.

    A ROI is also cut out from the given brain using the geometric landmarks. 
    The ROI is roughly 1/6 of the volumn of the original image. As a possible 
    next step, an iostropic (interpolation to a thin-slice) image may be 
    generation on the ROI, which is much more efficient than interpolation 
    on the whole image.   
    
    """

    image_file=domain+'/'+brain+".hdr"
    landmark_sage_object_file=domain+'/landmarks/'+brain+".sobj"
    landmark_image_file=domain+'/landmarks/'+brain+".hdr"
    #PCA_file=domain+"/landmarks/"+brain+"_PCA.sobj"
    if not os.path.exists(landmark_sage_object_file) or os.stat(landmark_sage_object_file).st_ctime < os.stat(landmark_image_file).st_ctime: 
        center,eigen=landmark_PCA(landmark_image_file,LANDMARK_LABLE['geometric']) 
    else:
        center,eigen=landmark_PCA(landmark_sage_object_file,LANDMARK_LABLE['geometric']) 
    
    save((center,eigen),domain+"/landmarks/"+brain+"_PCA")


    # compute the ROI 
    (DIM_x, DIM_y, DIM_z, VOX_MM_x, VOX_MM_y, VOX_MM_z)=image_dimension(image_file) 
    corner=center-vector([100*0.075,80*0.075,max(min(28*0.25,center[2]),80*0.25+center[2]-DIM_z*VOX_MM_z)])
    #logger.debug(corner)
    corner_vox_x=(corner[0]/VOX_MM_x).round()
    corner_vox_y=(corner[1]/VOX_MM_y).round()
    corner_vox_z=(corner[2]/VOX_MM_z).round()

    # update landmark after adjusting corner
    landmarks=load(landmark_sage_object_file)
    new_landmarks=dict()
    lables=landmarks.keys()
    for l in lables:
        new_landmarks[l]=map(lambda lm: lm-corner, landmarks[l]) 

    save(new_landmarks,codomain+'/landmarks/'+brain) 

    new_center=center-vector([corner_vox_x*VOX_MM_x,corner_vox_y*VOX_MM_y,corner_vox_z*VOX_MM_z])
    
    corner_vox_str="%03dx%03dx%03dvox"%(corner_vox_x,corner_vox_y,corner_vox_z)
    size_vox_str="%03dx%03dx%03dvox"%((200*0.075/VOX_MM_x).round(),(160*0.075/VOX_MM_y).round(),(80*0.25/VOX_MM_z).round())

    # Cut out the ROI of the image_file and landmark_image_file and store in codomain
    new_image_file=codomain+'/'+brain+".img" 
    subprocess.check_call(["c3d", image_file, "-region", corner_vox_str, size_vox_str, "-o", new_image_file])
    if os.path.exists(landmark_image_file):
        new_landmark_image_file=codomain+'/landmarks/'+brain+".img" 
        subprocess.check_call(["c3d", landmark_image_file, "-region", corner_vox_str, size_vox_str, "-o", new_landmark_image_file])

    save((new_center,eigen),codomain+"/landmarks/"+brain+"_PCA")

def geo_ortho(image_id,domain,codomain):
    """
    Using the geometric landmarks to ortho the images.

    """
    image_file=domain+'/'+image_id+".hdr"
    PCA_file=domain+"/landmarks/"+image_id+"_PCA.sobj"
    new_image_file=codomain+'/'+image_id+".hdr"
    
    (DIM_x, DIM_y, DIM_z, VOX_MM_x, VOX_MM_y, VOX_MM_z)=image_dimension(image_file) 

    if not (os.path.exists(new_image_file) and os.path.exists(PCA_file) and os.stat(new_image_file).st_ctime >= os.stat(PCA_file).st_ctime): 

        if os.path.exists(PCA_file):
            (center,eigen)=load(PCA_file)
        else:
            landmark_file=domain+"/landmarks/"+image_id+".hdr"
            center,eigen=landmark_PCA(landmark_file,LANDMARK_LABLE['geometric']) 
            save((center,eigen),domain+"/landmarks/"+image_id+"_PCA")
        image_center=vector([DIM_x*VOX_MM_x/2,DIM_y*VOX_MM_y/2,DIM_z*VOX_MM_z/2])
        rotation=matrix([eigen[1][1][0],eigen[2][1][0],eigen[0][1][0]]).transpose()
        translation=matrix(center).transpose()
        centering=matrix(image_center).transpose()

        rotation_isometry = block_diagonal_matrix([rotation,identity_matrix(1)])
        translation_isometry = block_matrix([Integer(1),translation,Integer(0),Integer(1)])
        centering_isometry = block_matrix([Integer(1),centering,Integer(0),Integer(1)])

        isometry_transformation=(translation_isometry*rotation_isometry*centering_isometry.inverse()).inverse()
        save(isometry_transformation,codomain+"/"+image_id+"_isometry")

        # generating "transfile" for Evan's transformL3 routine
        transfile_name=codomain+"/"+image_id+"_trans"
        transfile=open(transfile_name,'w')
        transfile.write("{0} {1} {2} {3} \n".format(rotation[0,0],rotation[0,1],rotation[0,2],0))
        transfile.write("{0} {1} {2} {3} \n".format(rotation[1,0],rotation[1,1],rotation[1,2],0))
        transfile.write("{0} {1} {2} {3} \n".format(rotation[2,0],rotation[2,1],rotation[2,2],0))
        transfile.write("{0} {1} {2} {3} \n".format(0,              0,              0,              1))
        transfile.write("{0} {1} {2} \n".format(DIM_x,DIM_y,DIM_z))
        transfile.write("{0} {1} {2} \n".format(VOX_MM_x,VOX_MM_y,VOX_MM_z))
        transfile.write("{0} {1} {2} \n".format(center[0],center[1],center[2]))
        transfile.write("{0} {1} {2} \n".format(image_center[0],image_center[1],image_center[2]-5))
        transfile.close()

        # invoke Evan's "transformL3" to orthognise the brain image
        #logger.debug("transformL3 "+image_file+" -transfile "+transfile_name+" -invert "+"-outname "+new_image)
        subprocess.check_call([BIN+"/transformL3", image_file, "-transfile", transfile_name, "-invert", "-outname", codomain+'/'+image_id])

    # if low resolution image than interpolate and resample to high resolution image
    if VOX_MM_z==0.5: 
        dim_str="%03dx%03dx%03dvox"%(DIM_x,DIM_y,DIM_z*2)
        subprocess.check_call(["c3d", new_image_file, "-interpolation", "Cubic", "-resample", dim_str, "-o", codomain+'/thin_slice/'+image_id+'.hdr'])

def stripped_PCA_to_ortho(brain,domain,codomain):
    """ 
    orthogonise skull striped brains by PCA.
    
    """
    # compute the center and principal_components of the given skull striped image.
    # orthononise the skull striped iamge image.
    # remember to save the transformation

    image_file=domain+'/'+brain+".hdr"
    (DIM_x, DIM_y, DIM_z, VOX_MM_x, VOX_MM_y, VOX_MM_z)=image_dimension(image_file) 

    PCA_file=codomain+"/"+brain+"_PCA.sobj"
    if os.path.exists(PCA_file):
        (center,eigen)=load(PCA_file)
    else:
        center,eigen=mask_PCA(image_file) 
        save((center,eigen),codomain+"/"+brain+"_PCA")
    image_center=vector([DIM_x*VOX_MM_x/2,DIM_y*VOX_MM_y/2,DIM_z*VOX_MM_z/2])
    rotation=matrix([eigen[1][1][0],eigen[2][1][0],eigen[0][1][0]]).transpose()
    translation=matrix(center).transpose()
    centering=matrix(image_center).transpose()

    rotation_isometry = block_diagonal_matrix([rotation,identity_matrix(1)])
    translation_isometry = block_matrix([Integer(1),translation,Integer(0),Integer(1)])
    centering_isometry = block_matrix([Integer(1),centering,Integer(0),Integer(1)])

    isometry_transformation=(translation_isometry*rotation_isometry*centering_isometry.inverse()).inverse()
    save(isometry_transformation,codomain+"/"+brain+"_isometry")

    # generating "transfile" for Evan's transformL3 routine
    transfile_name=codomain+"/"+brain+"_trans"
    transfile=open(transfile_name,'w')
    transfile.write("{0} {1} {2} {3} \n".format(rotation[0,0],rotation[0,1],rotation[0,2],0))
    transfile.write("{0} {1} {2} {3} \n".format(rotation[1,0],rotation[1,1],rotation[1,2],0))
    transfile.write("{0} {1} {2} {3} \n".format(rotation[2,0],rotation[2,1],rotation[2,2],0))
    transfile.write("{0} {1} {2} {3} \n".format(0,              0,              0,              1))
    transfile.write("200 160 80 \n")
    transfile.write("0.075 0.075 0.25 \n")
    transfile.write("{0} {1} {2} \n".format(center[0],center[1],center[2]))
    transfile.write("{0} {1} {2} \n".format(image_center[0],image_center[1],image_center[2]))
    transfile.close()

    # invoke Evan's "transformL3" to orthognise the brain image
    new_image=codomain+'/'+brain
    #logger.debug("transformL3 "+image_file+" -transfile "+transfile_name+" -invert "+"-outname "+new_image)
    subprocess.check_call([BIN+"/transformL3", image_file, "-transfile", transfile_name, "-invert", "-outname", new_image])
     
def ortho_to_boxed(brain,domain,codomain):
    new_image=codomain+'/'+brain
    image_file=domain+'/'+brain+".hdr"
    # I do not use subprocess.check_call below because "chg_data" seems return 1 even succeeds,
    # so I will just ignore the return value
    subprocess.call([BIN+"/chg_data", image_file, "-box", "-rows", "202", "310", "-cols", "170", "342", "-discard"])
    import shutil
    boxed_image=domain+"/"+brain+"_stripbox"
    shutil.move(boxed_image+".hdr", new_image+".hdr")
    shutil.move(boxed_image+".img", new_image+".img")

def landmark_correspondence(brain, domain, codomain):
    '''
    When landmarks were read out from image file into .txt file. The order may not correspond to each other.
    This code tries to re-build the correspondence using their distance towards a particular landmark (the one with the most negtive x-coordinate.) 
    '''
    landmark_filename=domain+'/'+brain+'.txt'
    ordered_landmark_filename=codomain+'/'+brain+'.txt'

    #landmarks=file(os.path.expanduser(landmark_filename)).readlines()
    landmarks=map(lambda lm: map(lambda x: float(x) ,lm.strip().split(' ')), file(os.path.expanduser(landmark_filename)).readlines())
    l0=min(landmarks) # the one with the most negative x-coordinate
    ordered_landmarks=sorted(landmarks, key=lambda x: (x[0]-l0[0])**2+(x[1]-l0[1])**2+(x[2]-l0[2])**2)

    with open(ordered_landmark_filename,"wb") as ordered_lm_f:
        for lm in ordered_landmarks:
            ordered_lm_f.write("{0} {1} {2}\n".format(lm[0], lm[1], lm[2]))

if __name__=="__main__" :
    import logging
    logger = logging.getLogger("actions")
    logger.setLevel(10)
    formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    ch = logging.StreamHandler()
    ch.setLevel(10)
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    if len(sys.argv)==5:
        action=sys.argv[1]
        brain=sys.argv[2]
        domain=sys.argv[3]
        codomain=sys.argv[4]
        globals()[action](brain,domain,codomain)


###############################  handy tools ###################################
def extract_geo_landmark(landmark_dir,labels=LANDMARK_LABLE['geometric']): 
    '''
    Extract the landmark position information and save it directly in a .txt file
    '''
    if not hasattr(labels,'__iter__'):
       labels=[labels]  # assume that labels is a single numerical label. 

    import glob  
    import pickle  
    currend_dir=os.path.abspath(os.curdir)
    os.chdir(landmark_dir)
    landmark_list=glob.glob('*.hdr')    

    for lm_file in landmark_list:
        (DIM_x, DIM_y, DIM_z, VOX_MM_x, VOX_MM_y, VOX_MM_z)=image_dimension(lm_file) 
        image_type=itk.Image[itk.US,3]
        reader=itk.ImageFileReader[image_type].New()
        reader.SetFileName(lm_file)
        reader.Update()
    
        itk_py_converter=itk.PyBuffer[image_type]
        image_array=itk_py_converter.GetArrayFromImage(reader.GetOutput())
    
        (z_range,y_range,x_range)=image_array.shape
    
        landmarks=dict()  # landmarks are vectors grouped by lables
        for z in range(z_range):
            for y in range(y_range):
                for x in range(x_range):
                    label=image_array[z,y,x] # label is given by the intensity of the lm_file
                    if label != 0:
                        try:
                            landmarks[label].append(vector([x*VOX_MM_x,y*VOX_MM_y,z*VOX_MM_z]))
                        except KeyError:
                            landmarks[label]=[vector([x*VOX_MM_x,y*VOX_MM_y,z*VOX_MM_z])]
    
        
        # average clusters of points to a single landmark location
        VL=[]
        for l in labels:
            # first let us "consolidate" landmark clusters on the same z plane
            # to alow more robust landmarking process. I will alow the landmarker
            # to use a (very tight) cluster of points on the same z plane to identify 
            # one landmark location. Here I am taking the geometric average to identify this 
            # landmark location  
            VL_l=[]
            for v in landmarks[l]:
                f_exists=False
                for i in range(len(VL_l)):
                    u,c=VL_l[i]
                    if u[2]==v[2] and (u-v).norm() < 0.25: # on the same z plane and are very close
                        u=mean(c*[u]+[v])
                        c+=1
                        VL_l[i]=(u,c)
                        f_exists=True
                        break
                if f_exists==False:
                    VL_l.append((v,1))
            landmarks[l]=map(lambda x: x[0], VL_l) 
    
            # then let us "consolidate" landmark clusters on the same or different z plane,
            # to alow more robust landmarking process. I will alow the landmarker
            # to use a (not so tight) cluster of points on the same z plane to identify 
            # one landmark location. Here I am taking the geometric average to identify this 
            # landmark location  
            VL_l=[]
            for v in landmarks[l]:
                f_exists=False
                for i in range(len(VL_l)):
                    u,c=VL_l[i]
                    if (u-v).norm() < 2: # kind of close
                        u=mean(c*[u]+[v])
                        c+=1
                        VL_l[i]=(u,c)
                        f_exists=True
                        break
                if f_exists==False:
                    VL_l.append((v,1))
            landmarks[l]=map(lambda x: x[0], VL_l) 
            VL+=landmarks[l]
        
        logger.debug("%d landmarks.",len(VL))
        
        image_id=lm_file.split(".")[0]
        save(landmarks, image_id)
       
        lm_text_filename=image_id+".txt"
        with open(lm_text_filename,"wb") as lm_t_f:
            geo_lms=landmarks[1] # currently, text file only stores "geometric" landmarks
            for lm in geo_lms:
                lm_t_f.write("{0} {1} {2}\n".format(lm[0], lm[1], lm[2]))
    os.chdir(currend_dir)

def change_date_format(image_dir='/home/xqwang/projects/FXTAS/mouse_corrected/ntrimouse'):
    '''
    The file names of the newly converted (from DICOM) Nifti files are of the following format: idyymmdd.nii(.gz). This utility
    changes it to the old id_mmddyy.nii(.gz) format.
    '''
    
    import glob  
    import shutil

    currend_dir=os.path.abspath(os.curdir)
    os.chdir(image_dir)
    image_list=glob.glob('??????????.nii.gz')    
    for image_filename in image_list:
        id_date=image_filename.split('.')[0]
        new_image_filename="{id}_{md}{yy}.nii.gz".format(id=id_date[0:4].strip('_'),yy=id_date[4:6],md=id_date[6:])
        shutil.move(image_filename, new_image_filename) 
    os.chdir(currend_dir)

def convert_landmarks_sobj2txt(landmark_dir):
    '''
    Read the landmark location from the ".sobj" file and save it in python pickle format and text file.
    I am forced to move away from sage since it does not integrate with nibabel well.
    '''
    
    import glob  
    currend_dir=os.path.abspath(os.curdir)
    os.chdir(landmark_dir)
    landmark_list=glob.glob('*.sobj')    
    for lm_file in landmark_list:
        lm=load(lm_file) 
        text_file=open(lm_file.split('.')[0]+'.txt',"wb")
        if len(lm[1])!=8:  
            logger.error("%s has only %d landmarks.",lm_file, len(lm[1]))
        for i in range(len(lm[1])):
            text_file.write("{0} {1} {2}\n".format(lm[1][i][0], lm[1][i][1], lm[1][i][2]))
        text_file.close()
    os.chdir(currend_dir)

def pick_resolution(image_pool="/home/xqwang/projects/FXTAS/mouse/ntrimouse",high_resolution_dir="/home/xqwang/projects/FXTAS/mouse/high", \
                                                                             low_resolution_dir="/home/xqwang/projects/FXTAS/mouse/low",   \
                                                                             other_resolution_dir="/home/xqwang/projects/FXTAS/mouse/others"): 
    ''' Move images of only strange resolution to .../others. That is, move images that has no
        90x512x512 nor 45x512x512 resolution to .../others'''

    import glob
    currend_dir=os.path.abspath(os.curdir)
    os.chdir(image_pool)
    image_list=glob.glob('*.img')    

    for image in image_list:
        if os.path.getsize(image) != 23592960L and os.path.getsize(image) != 47185920L: # neither 512x512x45 nor 512x512x45 resolution
            image_base_name=image.split('.')[0]
            image_id=image_base_name.split('-')[0]
            if not os.path.exists(high_resolution_dir+"/"+image_id+".img") and not os.path.exists(low_resolution_dir+"/"+image_id+".img"): 
                try:
                    shutil.copy(image_base_name+'.img', other_resolution_dir+"/"+image_base_name+".img") 
                except IOError:
                    shutil.copy(image_base_name+'.img', other_resolution_dir+"/"+image_base_name+"_strange.img") 
                try:
                    shutil.copy(image_base_name+'.hdr', other_resolution_dir+"/"+image_base_name+".hdr") 
                except IOError:
                    shutil.copy(image_base_name+'.hdr', other_resolution_dir+"/"+image_base_name+"_strange.hdr") 
    os.chdir(currend_dir)

def extract_brain_IDs(dir, file_name='brain_ids'):
    ''' exatract brain IDs from the given directory'''
    
    import glob
    currend_dir=os.path.abspath(os.curdir)
    os.chdir(dir)
    image_list=glob.glob('*.img')    
  
    f=open(file_name,"w") 
    for image in image_list:
        image_base_name=image.split('.')[0]
        f.write(image_base_name+"\n")

    f.close()
    os.chdir(currend_dir)

def extract_imaging_date(mask_dir="/home/xqwang/projects/FXTAS/mouse/stripped/mask"):
    ''' 
    extract absolute imaging date information
    '''
    import glob
    import datetime
    mask_list=glob.glob1(mask_dir, "*.img")
    mask_list=map(lambda m: m.split('.')[0], mask_list) 
   
    imaging_date=[] 
    for mask in mask_list:
        date_str=mask.split('_')[1]
        month=int(date_str[0:2])
        day=int(date_str[2:4])
        year=int('20'+date_str[4:6])
        date=datetime.date(year,month,day)
        if imaging_date.count(date) == 0:
            imaging_date.append(date)         
   
    return sorted(imaging_date)

def extract_age(mask_dir="/home/xqwang/projects/FXTAS/mouse/stripped/mask"):
    ''' 
    extract all possible ages (in days) of the mice for all imaging sessions 
    '''
    import glob
    import datetime
    
    mouse_table=load("/home/xqwang/projects/FXTAS/mouse/analysis/mouse.sobj") 
    mask_list=glob.glob1(mask_dir, "*.img")
    mask_list=map(lambda m: m.split('.')[0], mask_list) 
   
    imaging_age=[] 
    for mask in mask_list:
        mouse_id=mask.split('_')[0]
        acquisition_date_str=mask.split('_')[1]
        acquisition_month=int(acquisition_date_str[0:2])
        acquisition_day=int(acquisition_date_str[2:4])
        acquisition_year=int('20'+acquisition_date_str[4:6])
        acquisition_date=datetime.date(acquisition_year,acquisition_month,acquisition_day)
        birthday=mouse_table[mouse_id][0]
        age=(acquisition_date-birthday).days            
        if imaging_age.count(age) == 0:
            imaging_age.append(age)         
    
    return sorted(imaging_age)

def QC_name_masked(list_file, dir="/home/xqwang/projects/FXTAS/mouse/ROI", QC_dir="/home/xqwang/projects/FXTAS/mouse/ROI/QC"):
    ''' name masking for QC purpose. '''

    qc_id=os.path.basename(list_file).split('.')[0]
    brain_list=file(list_file).readlines()
    brain_list=map(lambda b: b.strip().split('.')[0], brain_list) # remove RET and , 
 
    qc_id_dir=QC_dir+"/"+qc_id

    if not os.path.exists(qc_id_dir+"/bubbles"):
        os.makedirs(qc_id_dir+"/bubbles")
 
    counter=0
    for brain in brain_list:
        counter+=1
        image_base_name=brain.split('.')[0]
        new_image_base_name="B_%03d"%counter
        shutil.copyfile(dir+"/"+image_base_name+".hdr", qc_id_dir+"/"+new_image_base_name+".hdr")
        shutil.copyfile(dir+"/"+image_base_name+".img", qc_id_dir+"/"+new_image_base_name+".img")
        shutil.copyfile(dir+"/bubbles/"+image_base_name+"_bubbles.hdr", qc_id_dir+"/bubbles/"+new_image_base_name+"_bubbles.hdr")
        shutil.copyfile(dir+"/bubbles/"+image_base_name+"_bubbles.img", qc_id_dir+"/bubbles/"+new_image_base_name+"_bubbles.img")

def obtain_volume_from_mask(brain_list=None, \
                            mask_directory="/home/xqwang/projects/FXTAS/mouse/stripped/mask",\
                            volume_object="/home/xqwang/projects/FXTAS/mouse/analysis/volume.sobj"):
                            #date_csv_file="/home/xqwang/projects/FXTAS/mouse/date_volume.csv",\
                            #age_csv_file="/home/xqwang/projects/FXTAS/mouse/date_volume.csv"):
    ''' 
    obtain the volume information of each brains in the given list.
    '''
    #if os.path.isdir(os.path.expanduser(brain_list)):
    #    import glob
    #    brain_list=glob.glob1(os.path.expanduser(brain_list),"*.hdr")
    #    brain_list=map(lambda brain: brain.split('.')[0], brain_list)
    #elif os.path.isfile(os.path.expanduser(brain_list)):
    #    brain_list=file(os.path.expanduser(brain_list)).readlines()


    mouse_table=load("/home/xqwang/projects/FXTAS/mouse/analysis/mouse.sobj") 
    import glob
    from datetime import date

    if brain_list==None:
        brain_list=mouse_table.keys() 
    #try:
    #    volume=load(volume_object)
    #except:
    #    volume=dict()
    #date_csv=open(date_csv_file,"w")
    #age_csv=open(age_csv_file,"w")
    volume=dict()
    for mouse in brain_list:

        birthday=mouse_table[mouse][0]
        #cgg=mouse_table[mouse][1]
        #date_csv.write("{0},{1},{2},".format(mouse, birthday,cgg))
        #age_csv.write("{0},{1},{2},".format(mouse, birthday,cgg))

        image_list=glob.glob(mask_directory+"/"+mouse+"_*.nii")
        for image in image_list: 
            p=subprocess.Popen(["c3d",image, "-voxel-integral"], stdout=subprocess.PIPE)
            brain_size=float(p.communicate()[0].split(":")[1].strip())
           
            acquisition_date_str=os.path.basename(image).split(".")[0].split("_")[1]
            acquisition_date=date(int("20"+acquisition_date_str[4:6]), int(acquisition_date_str[0:2]),int(acquisition_date_str[2:4]))
            
            age=(acquisition_date-birthday).days            
            try:
                volume[mouse].append((acquisition_date,age,brain_size))
            except:
                volume[mouse]=[(acquisition_date,age,brain_size)]
 
    save(volume,volume_object)
    #date_csv.close()
    #age_csv.close()
    return volume

def volume_to_csv(volume_object="/home/xqwang/projects/FXTAS/CORRECTED/analysis/volume.sobj", \
                  date_csv_file="/home/xqwang/projects/FXTAS/CORRECTED/analysis/date_volume.csv",\
                  age_csv_file="/home/xqwang/projects/FXTAS/CORRECTED/analysis/age_volume.csv"):
    '''
    generate two csv file about volume date of the mouse population
    1). mouse X imaging_date
    2). mouse X age
    '''
    mouse_table=load("/home/xqwang/projects/FXTAS/mouse/analysis/mouse.sobj")
    imaging_date=load("/home/xqwang/projects/FXTAS/mouse/analysis/imaging_date.sobj")
    imaging_age=load("/home/xqwang/projects/FXTAS/mouse/analysis/imaging_age.sobj")
    date_csv=open(date_csv_file,"w")
    age_csv=open(age_csv_file,"w")

    # construct header
    date_csv.write(",,")
    age_csv.write(",,")
    for date in imaging_date:
        date_csv.write(",{0}".format(date))
    date_csv.write("\n")
    for age in imaging_age:
        age_csv.write(",{0}".format(age))
    age_csv.write("\n")

    try:
        volume=load(volume_object)
    except:
        volume=obtain_volume_from_mask(None, "/home/xqwang/projects/FXTAS/CORRECTED/masks_corrected",volume_object)

    for mouse in sorted(volume):
        birthday=mouse_table[mouse][0]
        cgg=mouse_table[mouse][1]
        date_csv.write("{0},{1},{2}".format(mouse, birthday,cgg))
        age_csv.write("{0},{1},{2}".format(mouse, birthday,cgg))

        image_info_list=sorted(volume[mouse])

        for date in imaging_date:
            image_taken_on_the_date=False
            for image_info in image_info_list:     
                if date==image_info[0]: # image_info[0] records the date when the imaging
                    image_taken_on_the_date=True
                    break
            if image_taken_on_the_date:
                date_csv.write(",{0}".format(image_info[2])) #image_info[2] records the volume
            else:
                date_csv.write(",")
        date_csv.write("\n")

        for age in imaging_age:
            image_taken_at_the_age=False
            for image_info in image_info_list:     
                if age==image_info[1]: # image_info[0] records the age of the mouse when image was taken 
                    image_taken_at_the_age=True
                    break
            if image_taken_at_the_age:
                age_csv.write(",{0}".format(image_info[2])) #image_info[2] records the volume
            else:
                age_csv.write(",")
        age_csv.write("\n")
        
    date_csv.close()
    age_csv.close()

def mean_and_volume(mask_directory="/home/xqwang/projects/FXTAS/mouse/ROI/mask",\
                    head_directory="/home/xqwang/projects/FXTAS/mouse/ntrimouse",\
                    ROI_directory="/home/xqwang/projects/FXTAS/mouse/ROI",\
                    brain_directory="/home/xqwang/projects/FXTAS/mouse/stripped"):
    ''' 
    Pair up image brightness and brain volume for the analysis of possible correlation 
    '''
    birthday=load("/home/xqwang/projects/FXTAS/mouse/main/birthday.sobj") 
    import glob
    brain_list=birthday.keys() 
    L1=[]
    L2=[]
    for mouse in brain_list:
        image_list=glob.glob1(mask_directory,mouse+"_*.hdr")
        for mask in image_list: 
            image_id=os.path.basename(mask).split(".")[0]
            #head=head_directory+"/"+image_id+".hdr"
            #head_brightness=float(subprocess.Popen(["printVolMean",head], stdout=subprocess.PIPE).communicate()[0].strip())
            ROI=ROI_directory+"/"+image_id+".hdr"
            ROI_brightness=float(subprocess.Popen(["printVolMean",ROI, "-t", "8900"], stdout=subprocess.PIPE).communicate()[0].strip())
            brain=brain_directory+"/"+image_id+".hdr"
            brain_brightness=float(subprocess.Popen(["printVolMean",brain, "-m", mask], stdout=subprocess.PIPE).communicate()[0].strip())
            #p=subprocess.Popen(["c3d",mask, "-voxel-integral"], stdout=subprocess.PIPE)
            #brain_size=float(p.communicate()[0].split(":")[1].strip())
            L1.append(brain_brightness)
            L2.append(ROI_brightness)
    return L1, L2 

def correlation(L1, L2):
    ''' compute the correlation of the given two lists (of the same length)'''
    assert(len(L1)==len(L2))

    L1_0=map(lambda x: x-mean(L1),L1) 
    L2_0=map(lambda x: x-mean(L2),L2) 

    return vector(L1_0)*vector(L2_0)/((len(L1)-1)*std(L1)*std(L2))

def reuse_landmarks(low_resolution_dir="/home/xqwang/projects/FXTAS/mouse/main_low", high_resolution_dir="/home/xqwang/projects/FXTAS/mouse/main"):
    ''' reuse the landmark data obtained from high resolution images in the low resolution images. '''
    import glob
    low_list=glob.glob1(low_resolution_dir, "*.img")
    for img in low_list:
        if os.path.exists(high_resolution_dir+"/"+img):
            img_id=img.split('.')[0]
            shutil.copy(high_resolution_dir+"/landmarks/"+img_id+"_PCA.sobj",low_resolution_dir+"/landmarks/")
            shutil.copy(high_resolution_dir+"/landmarks/"+img_id+".sobj",low_resolution_dir+"/landmarks/")

def extract_non_overlap(low_resolution_dir="/home/xqwang/projects/FXTAS/mouse/main_low", high_resolution_dir="/home/xqwang/projects/FXTAS/mouse/main"):
    ''' Extract those that have only low resolution images '''
    import glob
    low_list=glob.glob1(low_resolution_dir, "*.hdr")
    for img in low_list:
        if os.path.exists(high_resolution_dir+"/"+img):
            img_id=img.split('.')[0]
            shutil.move(low_resolution_dir+"/"+img_id+'.hdr', "/home/xqwang/projects/FXTAS/mouse/overlap/")
            shutil.move(low_resolution_dir+"/"+img_id+'.img', "/home/xqwang/projects/FXTAS/mouse/overlap/")
            #shutil.copy(high_resolution_dir+"/landmarks/"+img_id+"_PCA.sobj",low_resolution_dir+"/landmarks/")
            #shutil.copy(high_resolution_dir+"/landmarks/"+img_id+".sobj",low_resolution_dir+"/landmarks/")

def printVolMean(image_dir="/home/xqwang/projects/FXTAS/mouse/ROI_low"):

    import glob
    image_list=glob.glob(image_dir+"/*.img")

    L=[]
    for img in image_list:
        brightness=float(subprocess.Popen(["printVolMean",img], stdout=subprocess.PIPE).communicate()[0].strip())
        L.append(brightness)

    return L

def masks_PCA(mask_dir="/home/xqwang/projects/FXTAS/CORRECTED/masks_corrected"):
    '''
    extract the "whole brain" PCA description, that is the 3 eigen values/vectors of the brain mask
    '''
    mouse_table=load("/home/xqwang/projects/FXTAS/mouse/mouse.sobj") 
    import glob
    mouse_list=mouse_table.keys() 
    L=[]
    for mouse in mouse_list:
        mask_list=glob.glob1(mask_dir,mouse+"_*.nii")
        for mask in mask_list: 
            mask_image_file=mask_dir+"/"+mask
            image_id=os.path.basename(mask).split(".")[0]
            mask_PCA_file=mask_dir+"/"+image_id+"_PCA.sobj"
            if not os.path.exists(mask_PCA_file) or os.stat(mask_PCA_file).st_ctime <= os.stat(mask_image_file).st_ctime: 
                center,eigen=mask_PCA(mask_image_file) 
                save((center,eigen),mask_dir+"/"+image_id+"_PCA")
            else: 
                center,eigen=load(mask_dir+"/"+image_id+"_PCA.sobj")
            #save((center,eigen),mask_dir+"/"+image_id+"_PCA")
            L.append((eigen[0][0],eigen[1][0],eigen[2][0]))

    return L

def landmarks_PCA(landmark_dir="/home/xqwang/projects/FXTAS/mouse/ROI/landmarks"):
    '''
    extract the landmark PCA description, that is the 3 eigen values/vectors of the landmark point clouds 
    '''
    mouse_table=load("/home/xqwang/projects/FXTAS/mouse/mouse.sobj") 
    import glob
    mouse_list=mouse_table.keys() 
    L=[]
    for mouse in mouse_list:
        landmark_list=glob.glob1(landmark_dir,mouse+"_*.hdr")
        for lm in landmark_list: 
            lm_image_file=landmark_dir+"/"+lm
            image_id=os.path.basename(lm).split(".")[0]
            lm_PCA_file=landmark_dir+"/"+image_id+"_PCA.sobj"
            if not os.path.exists(lm_PCA_file) or os.stat(lm_PCA_file).st_ctime <= os.stat(lm_image_file).st_ctime: 
                center,eigen=mask_PCA(lm_image_file) 
                save((center,eigen),landmark_dir+"/"+image_id+"_PCA")
            else: 
                center,eigen=load(landmark_dir+"/"+image_id+"_PCA.sobj")
            #save((center,eigen),mask_dir+"/"+image_id+"_PCA")
            L.append((eigen[0][0],eigen[1][0],eigen[2][0]))

    return L

def correct_landmark_label(landmark_dir="/home/xqwang/projects/FXTAS/mouse/new_images/new_only/high/high/landmarks"):
    '''
    correct labels
    '''
    import glob
    image_list=glob.glob(landmark_dir+"/*.img")

    for img in image_list:
        subprocess.check_call(["c3d", img, "-thresh", "1", "inf", "1", "0", "-o", img])


def obtain_PCA_from_mask(brain_list=None, \
                            mask_directory="/home/xqwang/projects/FXTAS/CORRECTED/masks_corrected",\
                            brain_PCA_object="/home/xqwang/projects/FXTAS/CORRECTED/analysis/brain_PCA.sobj"):
    ''' 
    Obtain the (length, width, thickness) information of each brains in the given list by a PCA analysis on the mask.
    Assiciate these PCA data with mouse_id   
    '''
    #if os.path.isdir(os.path.expanduser(brain_list)):
    #    import glob
    #    brain_list=glob.glob1(os.path.expanduser(brain_list),"*.hdr")
    #    brain_list=map(lambda brain: brain.split('.')[0], brain_list)
    #elif os.path.isfile(os.path.expanduser(brain_list)):
    #    brain_list=file(os.path.expanduser(brain_list)).readlines()


    mouse_table=load("/home/xqwang/projects/FXTAS/mouse/analysis/mouse.sobj") 
    import glob
    from datetime import date

    if brain_list==None:
        brain_list=mouse_table.keys() 
    #try:
    #    volume=load(volume_object)
    #except:
    #    volume=dict()
    #date_csv=open(date_csv_file,"w")
    #age_csv=open(age_csv_file,"w")
    brain_PCA=dict()
    for mouse in brain_list:
        birthday=mouse_table[mouse][0]
        cgg=mouse_table[mouse][1]
        
        mask_list=glob.glob(mask_directory+"/"+mouse+"_*.nii")
        for mask in mask_list: 
            image_id=os.path.basename(mask).split(".")[0]
            mask_PCA_file=mask_directory+"/"+image_id+"_PCA.sobj"
            if not os.path.exists(mask_PCA_file) or os.stat(mask_PCA_file).st_ctime <= os.stat(mask).st_ctime: 
                center,eigen=mask_PCA(mask) 
                save((center,eigen),mask_directory+"/"+image_id+"_PCA")
            else: 
                center,eigen=load(mask_directory+"/"+image_id+"_PCA.sobj")
           
            acquisition_date_str=os.path.basename(mask).split(".")[0].split("_")[1]
            acquisition_date=date(int("20"+acquisition_date_str[4:6]), int(acquisition_date_str[0:2]),int(acquisition_date_str[2:4]))
            
            age=(acquisition_date-birthday).days            

            try:
                brain_PCA[mouse].append((cgg, acquisition_date,age,(eigen[0][0],eigen[1][0],eigen[2][0])))
            except:
                brain_PCA[mouse]=[(cgg, acquisition_date,age,(eigen[0][0],eigen[1][0],eigen[2][0]))]
 
    save(brain_PCA,brain_PCA_object)
    return brain_PCA

def brain_PCA_vs_cgg(attention_list=[],brain_PCA_object="/home/xqwang/projects/FXTAS/mouse/analysis/brain_PCA.sobj",Normalized=True):
    '''
    Visualize the relation between the brain PCA and cgg repeats. The brain PCAs are represented by points in R^3, while the cgg repeats
    are coded by color.  
    '''

    import glob
    from datetime import date
    from sage.plot.plot3d.shapes2 import frame3d

    mouse_table=load("/home/xqwang/projects/FXTAS/mouse/analysis/mouse.sobj")
    mouse_list=mouse_table.keys() 
    if attention_list==[]:
        attention_list=mouse_list

    brain_PCA=load(brain_PCA_object)
  
    #Frame=frame3d([3.5,2.4,1.44],[3.9,2.65,1.62],color="black")
    #Points=[Frame] 
    Points=[]
    for mouse in mouse_list:
        # using cgg to comput the rgbcolor 
        cgg=mouse_table[mouse][1]
        
        if cgg < 15: 
            rgb=(1,0,0)
        elif cgg < 110:
            rgb=(0,1,0)
        else:
            rgb=(0,0,1)


        for image in brain_PCA[mouse]:
            # in the future we may want to color code the age
            age=image[2]
 
            xyz=map(lambda x: sqrt(x), image[3])

            if Normalized:
                xyz=map(lambda t: t/(prod(xyz)**(1.00/3.00)), xyz)
                #xyz=[xyz[0]/xyz[1],xyz[1]/xyz[1],xyz[2]/xyz[1]]

            # Here we highlight the possible outliers
            if mouse == "662" and age == 204:  # abnormal spacing of the image
                p = point3d(xyz, rgbcolor=rgb, opacity=0.25, size=25)
                #Points.append(p)
            elif mouse == "661" and age == 203: # abnormal spacing of the image  
                p = point3d(xyz, rgbcolor=rgb, opacity=0.25, size=25)
                #Points.append(p)
            elif mouse == "661" and age == 377: # volume much small other than other 661 images, treated as a outlier
                p = point3d(xyz, rgbcolor=rgb, opacity=0.75, size=25)
                #Points.append(p)
            elif mouse in attention_list:
                p = point3d(xyz, rgbcolor=rgb, opacity=1, size=(15+age/60))
                Points.append(p)
            else:
                p = point3d(xyz, rgbcolor=rgb, opacity=0.25, size=10)
                Points.append(p)
                
            #Points.append(point3d(xyz, rgbcolor=rgb, size=(5+age/30)))

    return sum(Points)

def brain_PCA_PCA(mouse_object="/home/xqwang/projects/FXTAS/mouse/analysis/mouse.sobj",brain_PCA_object="/home/xqwang/projects/FXTAS/mouse/analysis/brain_PCA.sobj"):
    '''
    PCA analysis of the brain_PCA 
    '''
    
# In this section, pick up the mouse ids 

    import glob
    from datetime import date

    mouse_table=load(mouse_object)
    mouse_list=mouse_table.keys() 

    brain_PCA=load(brain_PCA_object)
  
    Points=[] 
    for mouse in mouse_list:
        cgg=mouse_table[mouse][1]

# In this section, pick up the images 
        for image in brain_PCA[mouse]:
 
            #xyz=image[3]
            xyz=map(lambda x: sqrt(x), image[3])

            Points.append(vector(xyz))

    return principal_component_decomposition(Points) 

def mouse_table_csv(mouse_table_object="/home/xqwang/projects/FXTAS/mouse/analysis/mouse.sobj",\
                    mouse_table_csv_file="/home/xqwang/projects/FXTAS/CORRECTED/analysis/mouse.csv"):
    '''
    Export mouse.sobj to a csv file for importing to R
    '''
    mouse_table=load(mouse_table_object)
    mouse_table_csv=open(mouse_table_csv_file,"w")

    # construct header
    mouse_table_csv.write("mouse_id,birthday,cgg\n")

    for mouse in mouse_table:
        birthday=mouse_table[mouse][0]
        cgg=mouse_table[mouse][1]
        mouse_table_csv.write("{0},{1},{2}\n".format(mouse, birthday,cgg))

    mouse_table_csv.close()

def volume_csv(volume_table_object="/home/xqwang/projects/FXTAS/CORRECTED/analysis/volume.sobj",\
               volume_csv_file="/home/xqwang/projects/FXTAS/CORRECTED/analysis/volume.csv"):
    '''
    Export volume.sobj to a csv file for importing to R
    '''
    volume_table=load(volume_table_object)
    volume_csv=open(volume_csv_file,"w")

    # construct header
    volume_csv.write("mouse_id,imaging_date,age,volume,time_slot,problem\n")

    for mouse in volume_table.keys():
        for imaging in sorted(volume_table[mouse]):
            imaging_date=imaging[0]
            age=imaging[1]
            brain_volume=imaging[2]
           
            if age < 95:
                time_slot=1
            elif age < 205:
                time_slot=2
            elif age < 378:
                time_slot=3
            elif age < 430:
                time_slot=4
            elif age < 532:
                time_slot=5
            elif age < 627:
                time_slot=6
            else:
                time_slot=7

            if mouse == "662" and age == 204:  # abnormal spacing of the image
                problem = True 
            elif mouse == "661" and age == 203: # abnormal spacing of the image  
                problem = True 
            elif mouse == "661" and age == 377: # volume much small other than other 661 images, treated as a outlier
                problem = True 
            else:
                problem = False
                

            volume_csv.write("{0},{1},{2},{3},{4},{5}\n".format(mouse, imaging_date,age,brain_volume,time_slot,problem))

    volume_csv.close()

def brain_PCA_csv(brain_PCA_table_object="/home/xqwang/projects/FXTAS/CORRECTED/analysis/brain_PCA.sobj",\
                  brain_PCA_csv_file="/home/xqwang/projects/FXTAS/CORRECTED/analysis/brain_PCA.csv"):
    '''
    Export brain_PCA.sobj to a csv file for importing to R
    '''
    brain_PCA_table=load(brain_PCA_table_object)
    brain_PCA_csv=open(brain_PCA_csv_file,"w")

    # construct header
    brain_PCA_csv.write("mouse_id,imaging_date,age,length,width,height,time_slot,problem\n")

    for mouse in brain_PCA_table.keys():
        for imaging in sorted(brain_PCA_table[mouse]):
            imaging_date=imaging[1]
            age=imaging[2]
            length=sqrt(imaging[3][0])
            width=sqrt(imaging[3][1])
            hight=sqrt(imaging[3][2])
            if age < 95:
                time_slot=1
            elif age < 205:
                time_slot=2
            elif age < 378:
                time_slot=3
            elif age < 430:
                time_slot=4
            elif age < 532:
                time_slot=5
            elif age < 627:
                time_slot=6
            else:
                time_slot=7

            if mouse == "662" and age == 204:  # abnormal spacing of the image
                problem = True 
            elif mouse == "661" and age == 203: # abnormal spacing of the image  
                problem = True 
            elif mouse == "661" and age == 377: # volume much small other than other 661 images, treated as a outlier
                problem = True 
            else:
                problem = False
                

            brain_PCA_csv.write("{0},{1},{2},{3},{4},{5},{6},{7}\n".format(mouse,imaging_date,age,length,width,hight,time_slot,problem))

    brain_PCA_csv.close()


def clean_up(volume_table_object="/home/xqwang/projects/FXTAS/mouse/analysis/volume.sobj",\
             brain_PCA_table_object="/home/xqwang/projects/FXTAS/mouse/analysis/brain_PCA.sobj"):
    '''
    clean up repeating images in single time slot. Only the last image is preserved.
    '''
    brain_PCA_table=load(brain_PCA_table_object)
    volume_table=load(volume_table_object)

    clean_brain_PCA_table=dict()
    clean_volume_table=dict()

    for mouse in brain_PCA_table.keys():
        time_slots=dict()
        for imaging in sorted(brain_PCA_table[mouse]):
            age=imaging[2]
            if age < 95:
                time_slot=1
            elif age < 205:
                time_slot=2
            elif age < 378:
                time_slot=3
            elif age < 430:
                time_slot=4
            elif age < 532:
                time_slot=5
            elif age < 627:
                time_slot=6
            else:
                time_slot=7

            time_slots[time_slot]=imaging # only the latest image of the give time slot is preserved
        clean_brain_PCA_table[mouse]=time_slots.values()

    save(clean_brain_PCA_table,"/home/xqwang/projects/FXTAS/mouse/analysis/brain_PCA_clean")

    for mouse in volume_table.keys():
        time_slots=dict()
        for imaging in sorted(volume_table[mouse]):
            age=imaging[1]
            if age < 95:
                time_slot=1
            elif age < 205:
                time_slot=2
            elif age < 378:
                time_slot=3
            elif age < 430:
                time_slot=4
            elif age < 532:
                time_slot=5
            elif age < 627:
                time_slot=6
            else:
                time_slot=7

            time_slots[time_slot]=imaging # only the latest image of the give time slot is preserved
        clean_volume_table[mouse]=time_slots.values()

    save(clean_volume_table,"/home/xqwang/projects/FXTAS/mouse/analysis/volume_clean")

def normalize(brain_PCA_table_object="/home/xqwang/projects/FXTAS/mouse/analysis/brain_PCA_clean.sobj"):
    '''
    Mod out the volume from the brain_PCA data 
    '''
    brain_PCA_table=load(brain_PCA_table_object)

    normalized_brain_PCA_table=dict()

    for mouse in brain_PCA_table.keys():
        normalized_brain_PCA_table[mouse]=[]
        for imaging in sorted(brain_PCA_table[mouse]):
            cgg=imaging[0]
            dob=imaging[1]
            age=imaging[2]
            lwh=map(lambda e: sqrt(e), imaging[3])
            cr_vol=prod(lwh)**(1.00/3.00)
            normalized_lwh=(lwh[0]/cr_vol, lwh[1]/cr_vol, lwh[2]/cr_vol)
            
            normalized_brain_PCA_table[mouse].append((cgg,dob,age,normalized_lwh))        

    save(normalized_brain_PCA_table,"/home/xqwang/projects/FXTAS/mouse/analysis/brain_PCA_normalized")

###############################  Dicom to nifti conversion ###################################

DICOM_LIB="/usr/share/pyshared"
DICOM_FILES_PATH="/media/RFB_STORE/Updated Data Files Directory/Dicom Files"


# checking is py-dicom library is in the sys.path
if not DICOM_LIB in sys.path:
    sys.path.append(DICOM_LIB)


from os.path import join
import dicom
import re

def dicom2nii(target_dir='/home/xqwang/projects/FXTAS/mouse_corrected/ntrimouse'):
    '''
    Convert dicom files provided by Chris to Nifti format and, unlike qtdimp, preserving the important
    header information.
    '''

    # Walk the directory to each dicom media (sequence of files im01, im02, ect.) 
    # Load the im01 file, read the ImagePositionPatient and the ImageOrientationPatient data
    for root, dirs, files in os.walk(DICOM_FILES_PATH):
        if "im01" in files:
            p=re.compile('.*_r1$|.*_r2$')
            mo=p.match(root)
            if not mo:      # directory name not ended with _r1 or _r2
                logger.error(root+" is not ended with _r1 or _r2.")
                continue

            # if run into _r1, then check the existance of _r2 and check the consistance
            mo_r1=re.match('.*_r1$', root)
            if mo_r1:
                root_r2=re.sub('_r1$','_r2',root)
                if os.path.exists(root_r2):
                    ds_r1=dicom.read_file(join(root,"im01"))
                    ds_r2=dicom.read_file(join(root_r2,"im01"))
                    if not ds_r1.AcquisitionTime == ds_r2.AcquisitionTime:
                        logger.error(root +" and "+ root_r2 +" are inconsistant.")
                else:
                    logger.error(root_r2 + " does not exists.")
            
            # if run into _r2, then check the existance of _r1 and check the consistance, and convert Dicom to Nifti
            mo_r2=re.match('.*_r2$',root)
            if mo_r2:
                root_r1=re.sub('_r2$','_r1',root)
                if os.path.exists(root_r1):
                    ds_r1=dicom.read_file(join(root_r1,"im01"))
                    ds_r2=dicom.read_file(join(root,"im01"))
                    if not ds_r1.AcquisitionTime == ds_r2.AcquisitionTime:
                        logger.error(root_r1 +" and "+ root +" are inconsistant.")
                    else:
                        # invoke the conversion "dinifti" (http://cbi.nyu.edu/software/dinifti.php)
                        subprocess.check_call(['dinifti', '-g', '--name=%I', root, target_dir]) 
                else:
                    logger.error(root_r1 + " does not exists.")
                    
