#!/usr/bin/python -itt

import os.path as path
import struct
import sys
import time

import numpy as np
import logging
import logging.config
import os
scriptpath = os.path.dirname(os.path.realpath(__file__))
logging.config.fileConfig(scriptpath+'/logging.conf')
logger = logging.getLogger('readcorsika')

maxmem = 100. # megabytes

def readFileIntoMem( filename ):
    """  Read an entire corsika file into memory and return a string called *bytestream*
        
        **this function is called by readCorsikaFile()** and a user normally 
        does not need to call this function directly.
        
        In order to be efficient in parsing the contents of a corsika file
        I had the impression one should read the entire file in one blow. 
        And the work on the string, or the list of bytes.
        
        When the files get too large, this approach is of course totally stupid.
    """
    start = time.time()
    filesize = path.getsize( filename )
    if filesize/1024.**2 > maxmem:
        print "file:", filename, "is larger than:", maxmem, "MB"
        print "Please increase the maximum allowed memory usage or implement this routine in a better way"
        sys.exit(-1)
    f = open(filename, 'rb')
    bytestream = f.read(filesize)
    if len(bytestream) != filesize:
        raise Exception("file %s was not entirely read into mem." % (filename,))
    f.close
    logger.debug("readFileIntoMem duration: %0.4fs" , time.time()-start)
    return bytestream
    

def parseByteStringToDict( thedict, bytestream ):
    """ Parse the bytestream returned by readFileIntoMem() and return a dict.
    
        **this function is called by readCorsikaFile()** and a user normally 
        does not need to call this function directly.
    
        The dict will contain the keys::
        
           d['corsikaRunHeader']     #dict with self explaining keys
           d['corsikaRunFooter']     #list of floats - no explanations - see corsika manual
           d['corsikaEventHeaders']  #list of dicts with self explaining keys
           d['corsikaEventFooters']  #list of lists of floats - no explanations - see corsika manual
           d['raw_corsika_array']    # numpy array with data of all photons
    
    """
    start = time.time()
    size = len(bytestream)

    # get maximum number of photons in bytestream.
    # one photon is 7x4bytes:
    # we want to store these 7x4bytes = 7 doubles in a raw form
    # so lets make some space for it:
    # we save the event id as an eigths double
    max_number_of_photons = int( size/28.)
    
    # the bytestream should be multiples of 273*4 bytes, since this forms one "block"
    # lets check if this is the case
    
    # Each "block" in a corika file is always 273 "words" long.
    # a work is a 4-byte value, usually a double, but might also be an int.
    # The first "word" of a block is sometimes a sequence of 4 ascii 
    # charaters, in order to tell the reader, what type of block follows next.
    # these sequences can be: 
    #   * "RUNH" - a run header block
    #   * "RUNE" - a run footer block
    #   * "EVTH" - an event header block
    #   * "EVTE" - an event footer block
    # the corsika manual states more of there, but those are not 
    # expected in a corsika "cer-file".
    #
    # the block after an "EVTH" will contain photons, until an "EVTE" block is found

    
    if size % (273*4) != 0:
        logger.warning("size of corsika file is not multiple of 273*4 bytes. filesize: %d b" % size)
        #raise Exception()
    
    ec = 0 # event counter
    dbc = 0 # data block counter
    # Here I allocate a large mem array, so that I have enough space to put all photons
    # inside. This I do, so I do not have to allocated memory again and again.
    data = np.zeros( ( max_number_of_photons,7+2) )
    
    
    if bytestream[:4] == 'RUNH':
    
        for blockstart_id in range(0, size, 273*4):
            #print "addr:", hex(blockstart_id+4),
            #print "blockstart:", bytestream[blockstart_id:blockstart_id+4],
            #print "int:", struct.unpack('i',bytestream[blockstart_id:blockstart_id+4])[0],
            #print "hex:", hex(struct.unpack('i',bytestream[blockstart_id:blockstart_id+4])[0])
            
            if bytestream[blockstart_id:blockstart_id+4] == 'RUNH':
                run_header = struct.unpack( '272f', bytestream[blockstart_id+4:blockstart_id+273*4] )
                thedict['corsikaRunHeader'] = parseCorsikaRunHeaderToDict(run_header)
                thedict['corsikaEventHeaders'] = [] # empty list for event headers
                thedict['corsikaEventFooters'] = [] # empty list for event footers
                
            elif bytestream[blockstart_id:blockstart_id+4] == 'EVTH':
                event_header = struct.unpack( '272f', bytestream[blockstart_id+4:blockstart_id+273*4] )
                thedict['corsikaEventHeaders'].append(parseCorsikaEventHeaderToDict(event_header))
                
            elif bytestream[blockstart_id:blockstart_id+4] == 'EVTE':
                event_footer = struct.unpack( '272f', bytestream[blockstart_id+4:blockstart_id+273*4] )
                # The event_footer is boring an not going to be parsed... 
                thedict['corsikaEventFooters'].append(event_footer)
                ec += 1
            elif bytestream[blockstart_id:blockstart_id+4] == 'RUNE':
                run_footer = struct.unpack( '272f', bytestream[blockstart_id+4:blockstart_id+273*4] )
                # the run_footer is boring and not going to be parsed
                thedict['corsikaRunFooter'] = run_footer
                break
            else:
                data_tuple = struct.unpack( '273f', bytestream[blockstart_id:blockstart_id+273*4] )
                data_array = np.array( data_tuple, dtype=np.float32 ).reshape(39,7)
                #print data_array
                
                # find core location
                #   1. get last event header
                event_header_dict = thedict['corsikaEventHeaders'][-1]
                core_loc = np.array(event_header_dict['core location for scattered events in cm: (x,y)'][0])
                # subtract core location from x-y-coordinates of all photons
                data_array[:,1:3] -= core_loc
                
                data[dbc:dbc+39,:-2] = data_array.copy()
                data[dbc:dbc+39,-2] = ec
                data[dbc:dbc+39,-1] = np.arange(dbc,dbc+39)
                dbc += 39
        # now I have to find the empty rows 
        # or more precise I have to find all those rows, which are not empty.
        # or event more precisely I have to find all those lines where the columns
        # 4:8 are equal to 0.
        good_lines = np.where((data[:,4:7]!=0.).any(axis=1))[0]
        thedict['raw_corsika_array'] = data[good_lines,:]
    
    else:
        #we have a file with a the blocksize stored before each block.
        
        blockstart = 0
        while blockstart < size:
            
            blocksize = struct.unpack('i',bytestream[blockstart:blockstart+4])[0]
            subblocksStart = blockstart+4
            subblocksEnd = blockstart+4+blocksize
            blockEnd = blockstart+8+blocksize
            if blockEnd > size:
                raise Exception("blockEnd > filesize")
        
            subblocks = bytestream[subblocksStart:subblocksEnd]
            for blockstart_id in range(0, blocksize, 273*4):
                print "!addr:", hex(blockstart_id),
                print "blockstart:", subblocks[blockstart_id:blockstart_id+4],
                print "int:", struct.unpack('i',subblocks[blockstart_id:blockstart_id+4])[0],
                print "hex:", hex(struct.unpack('i',subblocks[blockstart_id:blockstart_id+4])[0])
                
                if subblocks[blockstart_id:blockstart_id+4] == 'RUNH':
                    run_header = struct.unpack( '272f', subblocks[blockstart_id+4:blockstart_id+273*4] )
                    thedict['corsikaRunHeader'] = parseCorsikaRunHeaderToDict(run_header)
                    thedict['corsikaEventHeaders'] = [] # empty list for event headers
                    thedict['corsikaEventFooters'] = [] # empty list for event footers
                    
                elif subblocks[blockstart_id:blockstart_id+4] == 'EVTH':
                    event_header = struct.unpack( '272f', subblocks[blockstart_id+4:blockstart_id+273*4] )
                    thedict['corsikaEventHeaders'].append(parseCorsikaEventHeaderToDict(event_header))
                    
                elif subblocks[blockstart_id:blockstart_id+4] == 'EVTE':
                    event_footer = struct.unpack( '272f', subblocks[blockstart_id+4:blockstart_id+273*4] )
                    # The event_footer is boring an not going to be parsed... 
                    thedict['corsikaEventFooters'].append(event_footer)
                    ec += 1
                elif subblocks[blockstart_id:blockstart_id+4] == 'RUNE':
                    run_footer = struct.unpack( '272f', subblocks[blockstart_id+4:blockstart_id+273*4] )
                    # the run_footer is boring and not going to be parsed
                    thedict['corsikaRunFooter'] = run_footer
                    break
                else:
                    data_tuple = struct.unpack( '273f', subblocks[blockstart_id:blockstart_id+273*4] )
                    data_array = np.array( data_tuple, dtype=np.float32 ).reshape(39,7)
                    #print data_array
                    
                    # find core location
                    #   1. get last event header
                    event_header_dict = thedict['corsikaEventHeaders'][-1]
                    core_loc = np.array(event_header_dict['core location for scattered events in cm: (x,y)'][0])
                    # subtract core location from x-y-coordinates of all photons
                    data_array[:,1:3] -= core_loc
                    
                    data[dbc:dbc+39,:-2] = data_array.copy()
                    data[dbc:dbc+39,-2] = ec
                    data[dbc:dbc+39,-1] = np.arange(dbc,dbc+39)
                    dbc += 39
                    
            blockstart = blockEnd
            
        # now I have to find the empty rows 
        # or more precise I have to find all those rows, which are not empty.
        # or event more precisely I have to find all those lines where the columns
        # 4:8 are equal to 0.
        good_lines = np.where((data[:,4:7]!=0.).any(axis=1))[0]
        thedict['raw_corsika_array'] = data[good_lines,:]
    
    
    logger.debug("parseByteStringToDict: %0.4fs", time.time()-start)
    return thedict
    

def parseCorsikaRunHeaderToDict( run_header ):
    """ parse the run header of a corsika file into a dict

        **this function is called by parseByteStringToDict()** and a user normally 
        does not need to call this function directly.
    
        The run header of a corsika file is a 272 float list, 
        where each number has a meaning by its position in the list.
        This is documented in the Corsika Manual.
        this is just moving the plain 272-float numbers from the original
        header int a dict.
        Not sure, if the key naming is well done :-/
    """
    if not len(run_header) == 272:
        raise TypeException('run_header length must be 272, but is: '+str(len(run_header)))
    h = run_header
    d = {}
    
    d['run number']=h[0]
    d['date of begin run']=int(round(h[1]))
    d['version of program']=h[2]
    
    n_obs_levels = int(round(h[3]))
    if (n_obs_levels < 1) or (n_obs_levels > 10):
        ValueException('number of observation levels n must be 0 < n < 11, but is: '+str(h[3]))
    obs_levels = []
    for i in range(4,4+n_obs_levels):
        obs_levels.append(h[i])
    d['observation levels']=tuple(obs_levels)
    
    d['slope of energy spektrum']=h[14]
    d['energy range']=(h[15],h[16])
    d['flag for EGS4 treatment of em. component'] = h[17]
    d['flag for NKG treatment of em. component'] = h[18]

    d['kin. energy cutoff for hadrons in GeV'] = h[19]
    d['kin. energy cutoff for muons in GeV'] = h[20]
    d['kin. energy cutoff for electrons in GeV'] = h[21]
    d['energy cutoff for photons in GeV'] = h[22]
    
    physical_constants = []
    for i in range(23,73):
        physical_constants.append(h[i])
    d['phyiscal constants']=tuple(physical_constants)
    
    d['X-displacement of inclined observation plane'] = h[73]
    d['Y-displacement of inclined observation plane'] = h[74]
    d['Z-displacement of inclined observation plane'] = h[75]
    d['theta angle of normal vector of inclined observation plane'] = h[76]
    d['phi angle of normal vector of inclined observation plane'] = h[77]
    
    # now some constants, I don't understand
    cka = []
    for i in range(93,133):
        cka.append(h[i])
    d['CKA'] = tuple(cka)
    
    ceta = []
    for i in range(133,138):
        ceta.append(h[i])
    d['CETA'] = tuple(ceta)
    
    cstrba = []
    for i in range(138,149):
        cstrba.append(h[i])
    d['CSTRBA'] = tuple(cstrba)
    
    d['scatter range in x direction for Cherenkov'] = h[246]
    d['scatter range in y direction for Cherenkov'] = h[247]
    
    hlay = []
    for i in range(248,253):
        hlay.append(h[i])
    d['HLAY'] = tuple(hlay)
    
    aatm = []
    for i in range(253,258):
        aatm.append(h[i])
    d['AATM'] = tuple(aatm)
    
    batm = []
    for i in range(258,263):
        batm.append(h[i])
    d['BATM'] = tuple(batm)
    
    catm = []
    for i in range(263,268):
        catm.append(h[i])
    d['CATM'] = tuple(catm)
    
    d['NFLAIN'] = h[268]
    d['NFLDIF'] = h[269]
    d['NFLPI0 + 100 x NFLPIF'] = h[270]
    d['NFLCHE + 100 x NFRAGM'] = h[271]
    
    return d
        

def parseCorsikaEventHeaderToDict( event_header ):
    """ parse the event header of a corsika file into a dict
    
        **this function is called by parseByteStringToDict()** and a user normally 
        does not need to call this function directly.

        The event header of a corsika file is a 272 float list, 
        where each number has a meaning by its position in the list.
        This is documented in the Corsika Manual.
        this is just moving the plain 272-float numbers from the original
        header int a dict.
        Not sure, if the key naming is well done :-/
    """
    if not len(event_header) == 272:
        raise TypeException('run_header length must be 272, but is: '+str(len(event_header)))
    h = event_header
    d = {}

    d['event number'] = long( round(h[0]) )
    d['particle id (particle code or A x 100 + Z for nuclei)'] = long( round(h[1]) )
    
    d['total energy in GeV'] = h[2]
    d['starting altitude in g/cm2'] = h[3]
    d['number of first target if fixed'] = h[4]
    d['z coordinate (height) of first interaction in cm'] = h[5]
    d['momentum in GeV/c in (x, y, -z) direction;'] = (h[6],h[7],h[8])
    d['angle in radian: (zenith, azimuth)'] = (h[9],h[10])
    
    n_random_number_sequences = int(round(h[11]))
    if (n_random_number_sequences < 1) or (n_random_number_sequences > 10):
        ValueException('number of random number sequences n must be 0 < n < 11, but is: '+str(h[11]))
    rand_number_sequences = []
    for i in range(12,12 + 3*n_random_number_sequences,3):
        seed = long( round(h[i]) )
        number_of_calls = long(round(h[i+2]))*1e6 + long(round(h[i+1]))
        rand_number_sequences.append( (seed, number_of_calls) )
    d['random number sequences: (seed, # of offset random calls)']=tuple(rand_number_sequences)
    
    # these are already in run_header ... 
    # can be used for sanity check
    # -------------------------------------------------------
    d['run number'] = h[42]
    d['date of begin run (yymmdd)'] = int( round(h[43]) )
    d['version of program'] = h[44]
    
    n_obs_levels = int(round(h[45]))
    if (n_obs_levels < 1) or (n_obs_levels > 10):
        ValueException('number of observation levels n must be 0 < n < 11, but is: '+str(h[3]))
    obs_levels = []
    for i in range(46,46+n_obs_levels):
        obs_levels.append(h[i])
    d['observation levels']=tuple(obs_levels)
    del obs_levels, n_obs_levels
    
    d['slope of energy spektrum']=h[56]
    d['energy range']=(h[57],h[58])
    
    d['kin. energy cutoff for hadrons in GeV'] = h[59]
    d['kin. energy cutoff for muons in GeV'] = h[60]
    d['kin. energy cutoff for electrons in GeV'] = h[61]
    d['energy cutoff for photons in GeV'] = h[62]
    
    d['NFLAIN'] = h[63]
    d['NFLDIF'] = h[64]
    d['NFLPI0'] = h[65]
    d['NFLPIF'] = h[66]
    d['NFLCHE'] = h[67]
    d['NFRAGM'] = h[68]
    
    d["Earth's magnetic field in uT: (x,z)"] = ( h[69], h[70] )
    
    d['flag for activating EGS4'] = h[71]
    d['flag for activating NKG'] = h[72]
    d['low-energy hadr. model flag (1.=GHEISHA, 2.=UrQMD, 3.=FLUKA)'] = h[73]
    d['high-energy hadr. model flag (0.=HDPM,1.=VENUS, 2.=SIBYLL,3.=QGSJET, 4.=DPMJET, 5.=NE X US, 6.=EPOS)'] = h[74]
    d['CERENKOV Flag (is a bitmap --> usersguide)'] = hex(int(round(h[75])))
    d['NEUTRINO flag'] = h[76]
    d['CURVED flag (0=standard, 2=CURVED)'] = h[77]
    d['computer flag (3=UNIX, 4=Macintosh)'] = h[78]
    d['theta interval (in degree): (lower, upper edge) '] = ( h[79], h[80] )
    d['phi interval (in degree): (lower, upper edge) '] = ( h[81], h[82] )
    
    d['Cherenkov bunch size in the case of Cherenkov calculations'] = h[83]
    d['number of Cherenkov detectors in (x, y) direction'] = (h[84], h[85])
    d['grid spacing of Cherenkov detectors in cm (x, y) direction'] = ( h[86], h[87])
    d['length of each Cherenkov detector in cm in (x, y) direction'] = ( h[88], h[89])
    d['Cherenkov output directed to particle output file (= 0.) or Cherenkov output file (= 1.)'] = h[90]
    
    d['angle (in rad) between array x-direction and magnetic north'] = h[91]
    d['flag for additional muon information on particle output file'] = h[92]
    d['step length factor for multiple scattering step length in EGS4'] = h[93]
    d['Cherenkov bandwidth in nm: (lower, upper) end'] = ( h[94], h[95] )
    d['number i of uses of each Cherenkov event'] = h[96]
    
    core_location_for_scattered_events_x_location = []
    core_location_for_scattered_events_y_location = []
    for i in range(97,117):
        core_location_for_scattered_events_x_location.append(h[i])
    for i in range(117,137):
        core_location_for_scattered_events_y_location.append(h[i])
    d['core location for scattered events in cm: (x,y)'] = zip(
        core_location_for_scattered_events_x_location,
        core_location_for_scattered_events_y_location)
    
    d['SIBYLL interaction flag (0.= no SIBYLL, 1.=vers.1.6; 2.=vers.2.1)'] = h[137]
    d['SIBYLL cross-section flag (0.= no SIBYLL, 1.=vers.1.6; 2.=vers.2.1)'] = h[138]
    d['QGSJET interact. flag (0.=no QGSJET, 1.=QGSJETOLD,2.=QGSJET01c, 3.=QGSJET-II)'] = h[139]
    d['QGSJET X-sect. flag (0.=no QGSJET, 1.=QGSJETOLD,2.=QGSJET01c, 3.=QGSJET-II)'] = h[140]
    d['DPMJET interaction flag (0.=no DPMJET, 1.=DPMJET)'] = h[141]
    d['DPMJET cross-section flag (0.=no DPMJET, 1.=DPMJET)'] = h[142]
    d['VENUS/NE X US/EPOS cross-section flag (0=neither, 1.=VENUSSIG,2./3.=NEXUSSIG, 4.=EPOSSIG)'] = h[143]
    d['muon multiple scattering flag (1.=Moliere, 0.=Gauss)'] = h[144]
    d['NKG radial distribution range in cm'] = h[145]
    d['EFRCTHN energy fraction of thinning level hadronic'] = h[146]
    d['EFRCTHN x THINRAT energy fraction of thinning level em-particles'] = h[147]
    d['actual weight limit WMAX for thinning hadronic'] = h[148]
    d['actual weight limit WMAX x WEITRAT for thinning em-particles'] = h[149]
    d['max. radius (in cm) for radial thinning'] = h[150]
    d['viewing cone VIEWCONE (in deg): (inner, outer) angle'] = (h[151], h[152])
    d['transition energy high-energy/low-energy model (in GeV)'] = h[153]
    d['skimming incidence flag (0.=standard, 1.=skimming)'] = h[154]
    d['altitude (cm) of horizontal shower axis (skimming incidence)'] = h[155]
    d['starting height (cm)'] = h[156]
    d['flag indicating that explicite charm generation is switched on'] = h[156]
    d['flag for hadron origin of electromagnetic subshower on particle tape'] = h[157]
    d['flag for observation level curvature (CURVOUT) (0.=flat, 1.=curved)'] = h[166]
    
    return d


def ExtractRawMmcsArray(run, inkey='raw_corsika_array', outkey='corsika_photon_data', delete=True):
    """ extract the raw_corsika_array in d into numpy arrays and return d
    
        **this function is called by readCorsikaFile()** and a user normally 
        does not need to call this function directly.
    
        The information about every photon is stored by MMCS6500 or by Corsika
        in 7 floats per photon. In a Parsing function executed prior to this function
        the headers were parsed, but the large 2D-array full of photons was not touched.
        Withing this function, I extract the information of this large block into
        smaller numpy arrays.
        
        A new nested dict d[outkey] will be added to the input dict d.
        If delete is true, the inkey will be deleted.
        
        The following keys will be added to d[outkey]::
        
            d[outkey]['j']=j
            d[outkey]['imov']=imov
            d[outkey]['wavelength']=wavelength
            d[outkey]['time']=prod_time
            d[outkey]['prod_height'] = height
            d[outkey]['ground_pos'] = pos
            d[outkey]['ground_dir'] = dir
            d[outkey]['event_id'] = event_id
            d[outkey]['photon_id'] = photon_id
        
    """
    start = time.time()
    # the data we read in here, does not comply with the original
    # data sheet.
    # corsika was edited by somebody calles Dorota from Lodz in order to write the 
    # wavelength of the simulated cherenkov photons and some other info.
    # I did not find any docu, but had a look into the code, which 
    # luckily is stored right next to the binary. :-)
    # according to that code the 1st element of the 7-element-tuple
    # belonging to each cherenkov-photon stores 3 types of information:
    # a parameter called J ... which might be the parent particles type
    # a parameter called IMOV, which seems to be constant = 1
    #   and which seems to have something to do with reusing showers
    # a parameter called WAVELENGTH which seems to be the cherekov
    #   photon wavelength in nm.
    #
    # the code looks like this:
    # J*100000. + IMOV*1000. + WAVELENGTH
    
    # In addition I want to store all data in separated numpy-arrays
    # so e.g. one array for the position and one the direction
    # one for the wavelength and so on...
    
    #########################################################################
    # I have found thise text in a deprecated function, so I pasted it here
    ##########################################################################
    #
    # At this point I have (hopefully) successfully read the entire file.
    # But the corsika coordinate system is not quite what I like.
    # In corsika the x-axis points north
    # and the y-axis points north.
    # And so are the direction cosines.
    # In addition I later found, that I would like to loop over all photons
    # in a corsika file, without respect to the event in order to delete all 
    # those which are absorbed or which definitely do not hit the detector,
    # because they are to far away.
    # This should reduce computation time a bit...
    # 
    # So what I do now, Is setting up a np.ndarray with two dimensions
    # ( photon_id , parameters ) so I can use nice numpy array operations.
    #
    # the former array contained:
    #        the *photon_definition_array* pda contains:
    #        pda[0] - encoded info
    #        pda[1:3] - x,y position in cm. x:north; y:west; z:down
    #        pda[3:5] - u,v cosines to x,y axis  --> so called direction cosines
    #        pda[5] - time since first interaction [ns]
    #        pda[6] - height of production in cm
    #        pda[7] - j ??
    #        pda[8] - imov ??
    #        pda[9] - wavelength [nm]
    #
    # 
    # the new parameters will be:
    # 0 - x-position in [cm] - x-axis points north
    # 1 - y-position in [cm] - y-axis points east
    # 2 - z-position in [cm] - z-axis points up
    #
    # 3 - now the direction cosine to x-axis: was formerly u
    # 4 - now the direction cosine to y-axis: was formerly -v
    # 5 - now the direction cosine to z-axis: is 1-[3]-[4]
    #
    # the next two are in radians:
    # 6 - theta (angle between photon path and z-axis): computed from 3, 4, and 5
    # 7 - phi   (angle between photon path projected into x-y-plane and x-axis)
    #
    # 8 - height of production in cm
    # 9 - wavelength in nm
    # 10- time since 1st interaction in ns
    # 11- core_location distance in cm
    # 12- event id
    # 13- photon id
    #
    # in order to find the length of the array I loop ever the events and count the photons
    #
    # we directly take care for the so called core_location
    data = run[inkey]
    n = data.shape[0]
    code = np.round(data[:,0]).astype(int)
    j = code/100000
    imov = (code-j*100000)/1000
    wavelength = np.mod(data[:,0], 1000.)
    
    prod_time = data[:,5]
    height = data[:,6]
    
    pos = np.zeros( (n,3), dtype=np.float32)
    pos[:,:-1] = data[:,1:3]
    
    dir = np.zeros( (n,3), dtype=np.float32)
    dir[:,:-1] = data[:,3:5]
    
    # the coordinates of dir are the two direction cosines called, u and v
    # the third coordinate is sqrt(1-u^2-v^2)
    dir[:,-1] = np.sqrt((dir*dir).sum(axis=1) * -1. + 1.)
    
    event_id = data[:,7]
    photon_id = data[:,8]
    
    if outkey not in run:
        run[outkey] = {}
    else: 
        raise Exception("outkey %s already in input dict" % outkey)
    
    run[outkey]['j']=j
    run[outkey]['imov']=imov
    run[outkey]['wavelength']=wavelength
    run[outkey]['time']=prod_time
    run[outkey]['prod_height'] = height
    run[outkey]['ground_pos'] = pos #* np.array([1,-1,1])
    run[outkey]['ground_dir'] = dir #* np.array([1,-1,1])
    run[outkey]['event_id'] = event_id
    run[outkey]['photon_id'] = photon_id
    
    if delete:
        del run[inkey]
    
    logger.debug("ExtractRawMmcsArray: %0.4fs",time.time()-start)
    return run
    
def readCorsikaFile( thedict, filename ):
    """ Read and interpret corsika file and return dict
    
        This function opens and reads an entire Corsika file. More precisely it read an MMCS6500 file,
        since it was developed for this private version of Corsika developed by the MAGIC collaboration.
        The dict, which is returned contains nested dictionaries. The most common call looks like::
        
           >>> run = readCorsikaFile("/corsika/folder/cer000001")
           >>> run.keys()
           ['corsikaRunHeader', 'corsikaRunFooter', 'corsika_photon_data', 'corsikaEventHeaders', 'corsikaEventFooters']

        The information inside a corsika file, can be seen as three types 
        of information.
        
        **Run based information** one can find inside the so called
        run header, and also in the run footer. This information can be found in::
        
           run['corsikaRunHeader']
           run['corsikaRunFooter']
        
        While the run header was parsed into a dict, with nicely self explaining keys,
        the run footer was not (yet?).
        
        **Event base information** can be found in a similar fashion inside two lists::
        
           run['corsikaEventHeaders']
           run['corsikaEventFooters']
           
        The former list contains again a dict full of nicely self documenting keys,
        while the latter is just a list of lists of floats. In the future, I would like to 
        have this information in numpy arrays much like the data for each photon.
        
        **Data for all photons**
        
        For the first part of the detector simulation all photons are treated equally.
        There is no difference between photon 3 from even 22 and photon 101 of event 2.
        The entire data available for the photons is stored inside a nested dict like::
        
           >>> print run['corsika_photon_data'].keys()
           ['prod_height', 'event_id', 'j', 'photon_id', 'imov', 'time', 'ground_pos', 'wavelength', 'ground_dir']
           
        In order to use the photon data one usually goes like this::
        
           >>> from pylab import *
           >>> run = readCorsikaFile("/corsika/folder/cer000001")
           >>> data = run['corsika_photon_data']
           >>> hist(data['wavelength'], bins=50)
    """
    bytestream = readFileIntoMem(filename)
    thedict = parseByteStringToDict(thedict, bytestream )
    thedict = ExtractRawMmcsArray(thedict, inkey='raw_corsika_array', outkey='photonData', delete=True)
    return thedict

def make_plots( run ):
    import new_helpers as helpers
    data = run['photonData']
    #helpers.plot_in_TGraph2D( data['ground_pos'], 'ground_pos'),
    #helpers.plot_in_TGraph2D( data['ground_dir'], 'ground_dir'),
    helpers.plot_in_TH2F( data['ground_pos'][:,:-1], 'ground_pos' )
    helpers.plot_in_TH2F( data['ground_dir'][:,:-1], 'ground_dir' )
    helpers.plot_in_TH1F( data['wavelength'] ,'wavelength')
    helpers.plot_in_TH1F( data['prod_height'] ,'prod_height')
    helpers.plot_in_TH1F( data['time'] ,'time')
    helpers.plot_in_TH1F( data['imov'] ,'imov')
    helpers.plot_in_TH1F( data['j'] ,'j')

if __name__ == '__main__':
    import rlcompleter
    import readline
    readline.parse_and_bind('tab: complete')
    from optparse import OptionParser

    parser = OptionParser()
    parser.add_option("-f", "--file", dest="filename",
        help="write report to FILE", metavar="FILE")
    parser.add_option("-p", "--plot",
        action="store_true", dest="create_plots", default=False,
        help="create some informative debugging plots")

    options, args = parser.parse_args()
    
    if options.filename is None:
        print 'Please submit a filename. "', sys.argv[0],'-h" for help'
        sys.exit(1)

    run = readCorsikaFile(options.filename)

    if options.create_plots:
        make_plots( run )
