import csv
import math
import os
import sys
import numpy
from netCDF4 import Dataset
from datetime import datetime
import _parts
import fnmatch
from odcutils.ncgen.netCDFGen import ncgen
class netCDFLoader:
    def __init__(self):
        # Initioalize list of cdl files
        self.newcdl = []
        self.netcdf_name = []
        self.csv = csv

    def templateCDL(self, type, gfile):
        """Reads template input from file. Use one file for each template
           type (CTD, ADCP, Minibat).
           Usage:
                templateCDL(type, param_file)
                param_file: string of full path to parameter file

           Parameter file format: Each line is the info for a single templated
           file with the following discriptors as heading row for any 
           instrument type:
            source_file
            :start_date (yyyymmddHHMMSS)
            :institution
            :institution_id
            :institution_url
            :institution_dods_url
            :contact
            :source
           
           Add extra columns depending on instrument type:
            CTD:
                z
                lat_dat
                lon_dat
                :station_name
                :platform_code
                :instrument_code
                :instrument_sn
                :process_level

            ADCP moored:
                t
                z
                lat_dat
                lon_dat
                :station_name
                :platform_code
                :instrument_code
                :instrument_sn
                :process_level

            ADCP hull mounter:
                t
                z
                :platform_code
                :instrument_code
                :instrument_sn
                :process_level

            Minibat:
                t
                z
                :platform_code
                :instrument_code
                :instrument_sn
            """ 
        self.type = type
        # Open definition input files
        f1 = open(gfile, 'rt')
        self.gfile = []
        # Read in global and dimension files into a list of dictionaries
        # one per row
        reader = csv.DictReader(f1, delimiter = ',' , \
                                quoting = csv.QUOTE_NONNUMERIC)
        for line in reader:
            self.gfile.append(line)
        self.nfiles = len(self.gfile)
        f1.close()
        # Choose templater
        if type == "CTD":
            return self._ctdtemplate()
        elif type == "ADCP_moored":
            print "not yet implemented\n"
        elif type == "ADCP_hull":
            print "not yet implemented\n"
        elif type == "Minibat":
            print "not yet implemented\n"
        else:
            print "Just choose from what you'r offered!\n"

    def _ctdtemplate(self):
        """Private template function for all CTD type datastructures.
           Builds cdl files acording to the supplied parameter file.
        """
        for i in range(self.nfiles):
        # Read in ctd template
            incdl = self._choosetemplate(self.gfile[i])
        # Create composite fields
            self.gfile[i][':station_id'] = \
                self.gfile[i][':institution_code'] + \
                self.gfile[i][':station_name']
        # Create output lsts
            acdl = incdl[:]
        # Run trough all the lines in cdl file and replace value of each key
            for line in acdl:
        # Loop keys in gfile
                for key in self.gfile[i]:
                    mkey = str(key) + ' =' 
                    if line.startswith(mkey):
                        val = self.gfile[i][key]
                        if _parts.isFloat(val):
                            aline = key + ' = ' + str(int(val)) + ';\n'
                            acdl[acdl.index(line)] = aline
                            break
                        else:
                            aline = key + ' = "' + self.gfile[i][key] + '";\n'
                            acdl[acdl.index(line)] = aline
                            break
                    else:
                        continue
        # Change the name of the netcdf file to be created
            netcdf_name = self.gfile[i][':institution_code'] + '_' + \
                            self.gfile[i][':instrument_code'] + '_' + \
                            '%6.0f' % self.gfile[i][':start_date'] + '_' +\
                            self.gfile[i][':station_name'] + '_' + \
                            'L' + str(int(self.gfile[i][':process_level']))
            self.netcdf_name.append(netcdf_name)
            acdl[0] = 'netcdf '+ netcdf_name + '{\n'
        # Build new templates
            nzero = math.floor(math.log(self.nfiles,10)) + 1
            fmt = '%0' + str(nzero) + 'd'
            self.newcdl.append('ctd_new' + fmt % i + '.cdl')
            f2 = open(self.newcdl[i],'w')
            f2.write(''.join(acdl))
            f2.close()

    def createNetCDF(self):
        """Creates the netcdf files templated with template method.
        """
############################ USES EXTERNAL ncgen #######################
#        for cdl in self.newcdl:
#            cmd = 'ncgen -b ' + cdl
#            os.system(cmd)
########################################################################
        for cdl in self.newcdl:
            [dims, vars, atts] = ncgen(cdl)

    def loadSBE25(self):
        """Reads in the files in the source_file descriptor and loads the data
           into the respective netcdf file.
        """
        # Try to load netcdf files return error if no files have been created
        try:
        # Loop all lines in input parameters looking for  each cnv file
            for i in range(len(self.gfile)):
        # Open cnv file
                f1 = open(self.gfile[i]['source_file'])
        # Dump all data into intermediate variable
                dump = f1.readlines()
        # Read timestamp
                for line in dump:
                    if line.startswith('# start_time'):
                        c = line.split()
                        c = ''.join(c[3:])
        # Create datetime object
                do = datetime.strptime(c, '%b%d%Y%H:%M:%S')
        # Extract pre-industrial from gregorian
                gdate = _parts.dTime2Loch(do)
        # Find header end and cut it
        # Try in windows and unix formats
                try:
                    hd = dump.index('*END*\r\n')
                except ValueError:
                    hd = dump.index('*END*\n')
                dump = dump[hd + 1:]
        # Initialise data array
                data = []
        # Convert list of strings to number
                for line in dump:
        # split() splits the string of space numerals into single string
        # numerals and map() applies the function float to every output of
        # list.split()
                    numbers = map(float, line.split())
        # append to data array
                    data.append(numbers)
        # open ntcdf file
                fname = self.netcdf_name[i] + '.nc'
                fdataset = Dataset(fname, 'a')
        # time (convert to seconds from pre-industrial datum)
                fdataset.variables['time'][:] = gdate * 3600 * 24
        # Lat, Lon
                fdataset.variables['lat'][:] = self.gfile[i]['lat_dat']
                fdataset.variables['lon'][:] = self.gfile[i]['lon_dat']
        # Make numpy array
                data = numpy.vstack(data)
        # Transpose for looping over single variable
                data = data.transpose()
        # Remove last row (flag)
                data = data[0:-1,:]
        # Try to dump data and if it fails return the and variables files
        # which are  wrongly dimensioned
                try:
                    for key, line in \
                        zip(reversed(fdataset.variables), reversed(data)):
                        fdataset.variables[key][:] = line
                except IndexError:
                    print fname, key
        # Close netcdf file
                fdataset.close()
        # Close cnv file
                f1.close()
        except AttributeError:
            print 'Data loading failed: please create netCDF files first.\n'


    def loadRBR(self):
        """Reads in the files in the source_file descriptor and loads the data
           into the respective netcdf file.
        """
        # Loop all lines in input parameters looking for  each dat file
        for i in range(len(self.gfile)):
        # Open dat file
            f1 = open(self.gfile[i]['source_file'])
        # Dump all data into intermediate variable
            dump = f1.readlines()
        # Read timestamp
            timestamp = float(dump[0].split()[0])
            do = _parts.dNum2DTime(timestamp)
            date = do.strftime('%Y%m%d%H%M%S')
        # Extract pre-industrial from gregorian
            gdate = _parts.dTime2Loch(do)
        # Initialise data array
            data = []
        # Convert list of strings to number
            for line in dump:
                numbers = map(float, line.split())
        # append to data array
                data.append(numbers)
        # open ntcdf file
            fname = self.netcdf_name[i] + '.nc'
            fdataset = Dataset(fname, 'a')
        # time (convert to seconds from pre-industrial datum)
            fdataset.variables['time'][:] = gdate * 3600 * 24
        # Lat, Lon
            fdataset.variables['lat'][:] = self.gfile[i]['lat_dat']
            fdataset.variables['lon'][:] = self.gfile[i]['lon_dat']
        # Make numpy array
            data = numpy.vstack(data)
        # Transpose for looping over single variable
            data = data.transpose()
        # Remove 1st row (flag)
            data = data[1:,:]
        # Try to dump data and if it fails return the and variables files
        # which are  wrongly dimensioned
            try:
                for key, line in \
                    zip(reversed(fdataset.variables), reversed(data)):
                    fdataset.variables[key][:] = line
            except IndexError:
                print fname, key
        # Close netcdf file
            fdataset.close()
        # Close cnv file
            f1.close()


    def probe(self, adir):
        """reads all input files in directory and outputs a csv file with the 
        following columns 'filename', 'nscans', 'date'.
        Implemented for SBE and RBR CTD input files.
        """
        fname = []
        nscans = []
        date = []
        # Try openning the cnv files
        cnvlist = fnmatch.filter(os.listdir(adir),'*.cnv')
        # Try opening the rbr dat files
        datlist = fnmatch.filter(os.listdir(adir),'*.dat')
        # Call probes
        for cnv in cnvlist:
            fname.append(os.path.join(adir,cnv))
        # Open dat file
            f1 = open(os.path.join(adir,cnv))
        # Dump all data into intermediate variable
            dump = f1.readlines()
        # Close dat file
            f1.close()
        # Call probe
            ns, dt = self._cnvProbe(dump)
        # Append to variables
            nscans.append(ns) , date.append(dt)
        for dat in datlist:
            fname.append(os.path.join(adir,dat))
            print fname[-1]
        # Open dat file
            f1 = open(os.path.join(adir,dat))
        # Dump all data into intermediate variable
            dump = f1.readlines()
        # Close dat file
            f1.close()
        # Call probe
            ns, dt = self._datProbe(dump)
        # Append to variables
            nscans.append(ns) 
            date.append(dt)
        # Open csv file
        f1 = open(os.path.join(adir,'probe.csv'), 'w')
        # Create csv file writter object
        writer = csv.writer(f1)
        # Use zip to distribute items in the 3 variables into each row
        L = zip(fname, nscans, date)
        # Write into csv file
        writer.writerows(L)
        # Close csv file
        f1.close()


    def _datProbe(self, dump):
        """reads all dat files in directory and outputs a csv file with the 
        following columns 'filename', 'nscans', 'date'"""
    # Find timestamp
        timestamp = map(float, dump[0].split())[0]
#        print timestamp
    # Create datetime object
        do = _parts.dNum2DTime(timestamp)
        date = do.strftime('%Y%m%d%H%M%S')
        nscans = len(dump)
        return nscans, date


    def _cnvProbe(self, dump):
        """reads all cnv files in directory and outputs a csv file with the 
        following columns 'filename', 'nscans', 'date'"""
        # Read timestamp
        for line in dump:
            if line.startswith('# start_time'):
                c = line.split()
                c = ''.join(c[3:])
        # Create datetime object
                do = datetime.strptime(c, '%b%d%Y%H:%M:%S')
                date = do.strftime('%Y%m%d%H%M%S')
        # Find header end and cut it
        # Try in windows and unix formats
        try:
            hd = dump.index('*END*\r\n')
        except ValueError:
            hd = dump.index('*END*\n')
        dump = dump[hd + 1:]
        nscans = len(dump)
        return nscans, date


    def _choosetemplate(self, gline):
        """Returns the lines of the correct template file"""
        # Get path to local install of  netCDFLoader.py
        basedir = os.path.dirname(os.path.realpath(__file__))
        # Build path to _template directory
        templdir = os.path.join(basedir,'_templates')
        # Build template filename
        icode = str(gline[':instrument_code'])
        plevel = 'L' + str(int(gline[':process_level']))
        # Try to open file with instrument code and process level
        try:
            fname = icode + '_' + plevel + '.cdl'
            fname = os.path.join(templdir,fname)
            f = open(fname)
            lines = f.readlines()
            f.close()
        except IOError:
        # If unlucky try with just instrument code
            fname = icode + '.cdl'
            fname = os.path.join(templdir,fname)
            f = open(fname)
            lines = f.readlines()
            f.close()
        return lines
