#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Purpose: Text export for HydroPlatform
# Created: 08/26/2009
# $Id $
# Copyright (c) 2008,2009 by University College London
# Authors:
#  Didrik Pinte <dpinte@dipole-consulting.com>
#
# This program is free software under the GPL (>=v2)
# Read the file COPYING coming with HydroPlatform for details.




import codecs
from itertools import count, chain
import logging
import numpy
import os
import sets
import datetime

from sqlalchemy import and_

from hydroplatform.model.export import Exporter, ExportError, ExporterRegistry
from hydroplatform.model import network as nt
from hydroplatform.model import fields as fd
from hydroplatform.model import units

MAX_LEN = 31

class TextExport(Exporter):
    '''
    Exports network to a set of text files
    '''
    _name = "Text exporter for GAMS"
    _description  = '''Exports a network to a set of text files '''
    
    def __init__(self):
        Exporter.__init__(self)
        self.session = None    
        
    def export(self, network, filename, session=None):
        '''
        Export the given network to a set of files
        '''
        if os.path.exists(filename):
          os.remove(filename)
        os.mkdir(filename)
        basedir = filename
        
        if session is None:
            raise ExportError(u"Cannot export without a session object")
        self.session = session
        assert isinstance(network, nt.Network)
        
        
 
        proj = network.project
        
        dvalues = units.getDateSerie(proj.horizon_from, proj.horizon_to, proj.time_step)
       
        yearset =sets.Set()
        monthset = sets.Set()
        dayset = sets.Set()
        for dvalue in dvalues:
            yearset.add(dvalue.year)
            monthset.add(dvalue.month)
            dayset.add(dvalue.day)
            
        yr_file = codecs.open(os.path.join(basedir, u"yr.txt"), mode="w", encoding="utf8") 
      # if len(monthset)-1:
        mm_file = codecs.open(os.path.join(basedir, u"mn.txt"), mode="w", encoding="utf8") 
      # if len(dayset)-1:
        dy_file = codecs.open(os.path.join(basedir, u"dy.txt"), mode="w", encoding="utf8") 
        for x in yearset:
          yr_file.write("%i\n" % x) 
        yr_file.close()
      # if len(monthset)-1:
        for x in monthset:
          mm_file.write("%i\n" % x)
        mm_file.close()
      # if len(dayset)-1:
        for x in dayset:
          dy_file.write("%i\n" % x)
        dy_file.close()
            
        # write the node sheet
        node_file = codecs.open(os.path.join(basedir, u"nodes.txt"), mode="w", encoding="utf8") 
        ntypes = dict()
        # get node list for this network ordered by node type
        nodes_query = session.query(nt.Node).filter(\
            nt.Node.networks.any(nt.Network.id == network.id))\
            .order_by(nt.Node.type_id)
        nodes = nodes_query.all()
        for i, node in enumerate(nodes):                
            node_file.write("%s\n" % node.name[:MAX_LEN])
            if not ntypes.has_key(node.type):
                ntypes[node.type] = [node]
            else: 
                ntypes[node.type].append(node)
        node_file.close()
            
        # for each node type, create a sheet with its node list
        for ntype, nodes in ntypes.iteritems():
            ntype_fname = os.path.join(basedir, ntype.name.replace(" ", "_") + ".txt")
            ntype_file = codecs.open(ntype_fname, mode="w", encoding="utf8")
            for i, node in enumerate(nodes):
                ntype_file.write(node.name[:MAX_LEN])
                ntype_file.write("\n")
            ntype_file.close()
                
        ltypes = dict()
        # links list
        links_file = codecs.open(os.path.join(basedir, u"links.txt"), mode="w", encoding="utf8")
        for i, link in enumerate(network.links):
            links_file.write("%s . %s\n" % (link.start.name[:MAX_LEN], link.end.name[:MAX_LEN]))
            if not ltypes.has_key(link.type):
                ltypes[link.type] = []
            ltypes[link.type].append(link)
        links_file.close()
        
        # for each link type, create a sheet with its link list
        for ltype, links in ltypes.iteritems():
            ltype_fname = os.path.join(basedir, ltype.name.replace(" ", "_") + ".txt")
            ltype_file = codecs.open(ltype_fname, mode="w", encoding="utf8")
            for i, link in enumerate(links):
                ltype_file.write("%s . %s\n" % (link.start.name[:MAX_LEN], link.end.name[:MAX_LEN])) 
            ltype_file.close()
         
        # links connectivity    
        connectivity_file =  codecs.open(os.path.join(basedir, u"connect.txt"), mode="w", encoding="utf8")
        # write the first column headers
        nodes = nodes_query.all()
        # add a first tab before the first column
        connectivity_file.write("\t")
        for j, node in enumerate(nodes):
            connectivity_file.write("%s\t" % node.name[:MAX_LEN])   
        connectivity_file.write("\n")
        # write the next columns 
        startnodes = session.query(nt.Node).filter(\
            nt.Node.networks.any(nt.Network.id == network.id))\
            .order_by(nt.Node.type_id).all()            
        endnodes = session.query(nt.Node).filter(\
            nt.Node.networks.any(nt.Network.id == network.id))\
            .order_by(nt.Node.type_id).all()

        for i, snode in enumerate(startnodes):
            # the first line is the node name
            connectivity_file.write("%s\t" % snode.name[:MAX_LEN])   
            for col, enode in enumerate(endnodes):
                if network.has_link(snode, enode):
                    for link  in network.links:
                        if link.start == snode and link.end == enode :
                            # not exporting the link.type_id but O/1
                            connectivity_file.write("%i\t" % 1)   
                else:
                    connectivity_file.write("%i\t" % 0)   
                    
            connectivity_file.write("\n")
        connectivity_file.close()
        
        # write the fields
        # for each node type        
        
        params = list()
        tables = list()
        refs = list()
        
        for ntype, nodes in ntypes.iteritems():
            logging.debug("Node type : %s" % ntype.name)
            (oparams, otables, orefs) = self.export_otype(basedir, ntype, nodes, network )
            if oparams is not None:
                params.extend(oparams)
            if otables is not None:
                tables.extend(otables)
            if orefs:
              refs.append(("Nodes",orefs))
                
        # for each link type
        for ltype, links in ltypes.iteritems():
            logging.debug("Link type : %s" % ltype.name)
            (oparams, otables, orefs) = self.export_otype(basedir,ltype, links, network)
            if oparams is not None:
                params.extend(oparams)
            if otables is not None:
                tables.extend(otables)                
            if orefs:
              refs.append(('Links',orefs))
            
        data_listing_file = codecs.open(os.path.join(basedir,'data.txt'), mode="w", encoding="utf8") 
        data_listing_file.write(
        
""" 
SETS
    i all model nodes/
$   include nodes.txt
    /
    
* This line creates a secondary node index j (an 'alias' in GAMS parlance)
Alias(i,j)
""")

        if refs[1][1]:
          data_listing_file.write('\nAlias(i,ii)\n\nAlias(i,jj)\n\n')
        
        data_listing_file.write("""

SETS

* Time steps
    yr years /
$   include yr.txt
    /
    mn months /
$   include mn.txt
    /
    dy days /
$   include dy.txt
/

* Subsets

""")

        # line is composed of three targets : name, description and file
        line_format =  "\n\t%%s(%s) %%s/\n$\tinclude %%s\n/" 
        node_format =  line_format % ( "i")
        link_format =  line_format % ( "i,j")

        # Adds list of Nodes and Links with refs
        if refs[0]:
          data_listing_file.write('NODEref (i)/\n$\tinclude NODEref.txt\n\t/')
        if refs[1]:
          data_listing_file.write('\nLINKref (i,j)/\n$\tinclude LINKref.txt\n\t/')
        
        
        if refs[0][1]:   # if there are any refs from nodes
          for i in range(len(refs[0][1])):
            field = refs[0][1][i]
            if isinstance(field.attribute_type,fd.NodeReferences):
              data_listing_file.write("\n%s(i,j) / \n$\t include %s.txt\n/\n" % (field.name,field.name))
            else:
              data_listing_file.write("\n%s(i,j,ii) / \n$\t include %s.txt\n/\n" % (field.name,field.name))
              
        if refs[1][1]:   # if there are any refs from links
          for i in range(len(refs[1][1])):
            field = refs[1][1][i]
            if isinstance(field.attribute_type,fd.NodeReferences):
              data_listing_file.write("\n\t%s(i,j,ii) / \n$\t include %s.txt\n/" % (field.name,field.name))
            else:
              data_listing_file.write("\n\t%s(i,j,ii,jj) / \n$\t include %s.txt\n/" % (field.name,field.name))

        # export nodes
        for obj in ntypes.keys():
            obj_fname = obj.name.strip().replace(" ", "_") + ".txt"
            data_listing_file.write(node_format % (obj.name[:MAX_LEN].strip(), 
                                                   "", #obj.description.replace("\n", " "),
                                                   obj_fname))
        # export links
        data_listing_file.write(link_format %
                                ("links", "", "links.txt"))
        for obj in ltypes.keys():
            obj_fname = obj.name.strip().replace(" ", "_") + ".txt"
            data_listing_file.write(link_format % (obj.name[:MAX_LEN].strip(), 
                                                   "", #obj.description.replace("\n", " "),
                                                   obj_fname)) 
        # export sets of parameters 
        data_listing_file.write("\n\n* Parameter name sets\n\n")
        
        parameter_format = "\n\t%s_par %s/\n$\tinclude %s_par.txt\n/" 
        
        for pname in  params:
            data_listing_file.write(parameter_format % (pname, "", pname ))
            
        # export tables
        data_listing_file.write("\n\n* Tables\n\n")
			
        table_format = "\nTable %s_data(%s) %s\n$\tinclude %s_data.txt\n" 
        for tname, dims in tables:
            if 'par' not in tname:
                data_listing_file.write(table_format % (tname, dims, "", tname))
            else: 
                d_list = dims.split(',')[1:]
                d_list.reverse()
                d_list.append('i')
                d_list.reverse()
                val = ','.join(d_list)
                data_listing_file.write(table_format % (tname,val,"",tname))
            
        data_listing_file.write("""
* Table summarizing network connectivity ('Connectivity matrix')
Table    Connect(i,j)
$        include connect.txt
""")
            
        data_listing_file.close()        
#This function is done

        # FIXME: The following procedure is horrible and slow. Badly needs to be rewritten.
        # SHOULD REALLY BE USING _REGEX_
        # arr = re.findall('Table\s\w.*',text) gives an array with all the matching Table
        # lines (and also the summarizing line).
        
        #myfname="./MyFile" POSIX style, all commented out
        os.chdir(basedir)
        MYFILE = codecs.open( "MyFile.txt", mode="w", encoding="utf8")
        FILE = open('data.txt', "r")
        # MYFILE = open (myfname, "w")
        buffer = ''
        buffer_list=[]
        table_list=[]
        
        for i in range(100000): # This will support data_listing file <100KB
          
          char =FILE.read(1)
          if char != '\n' : 
            buffer = buffer + char
          else:
            if  buffer !='SuperMonkeyMan' and buffer!='':
              buffer_list.append(buffer)
              buffer = '' 
        for i in range (len(buffer_list)):
          L = buffer_list[i]
          if (i+1)!= len(buffer_list):
            L2 = buffer_list[i+1]
          if ('Table' in L) and ('tab' in L) and ('data' in L):
            table_list.append(L)
            table_list.append(L2)
        
        FILE.close
        FILE = codecs.open('data.txt', mode="r", encoding="utf8")
        
        for line in FILE:
          if 'Parameter' in line:
            MYFILE.write('* Parameter name sets'+'\n') 
            temp_string = ''
            for L in (table_list):
              if 'Table' in L:
                for counter in range (6,len(L)-1):
                  temp_string = temp_string + L[counter]
                  if L[counter+1] == '(':
                    MYFILE.write('\n')
                    MYFILE.write( '\t'+temp_string[0:-4]+'par  /\n')
                    temp_string = ''
                    break
              else:
                MYFILE.write (L)
                MYFILE.write ('\n/')
          else:
            MYFILE.write(line)
        
        MYFILE.close()  # MYFILE is correct at this point, now need to copy all contents of MYFILE to FILE
        FILE.close()
        #MYFILE = open(myfname, "r")
        
        MYFILE = codecs.open( "MyFile.txt", mode="r", encoding="utf8")
        FILE = codecs.open('data.txt', mode="w", encoding="utf8")
        
        for line in MYFILE:
          FILE.write(line)
       
        
        MYFILE.close() 
        FILE.close()
 
        # The procedure is:
        # - read all the lines in the main file 
        # - put all the lines in a buffer list
        # - copy all needed lines to table list
        # - Iterate through table list
        # - open tempfile for writing, close and reopen main file for reading
        # - copy everything, main to tempfile, till parameters heading
        # - Print out table list  elements, dont forget 'include' and proper format
        # - Continue copying till end, close files, then read tempfile, write to main file
        # - Copy files line by line
        # Shouldn't take too long, I expect you to be done in an hour at most
        
        # One hour my ass
        # Feck, I'm even more tired now than i was yesterday
        
        # Done, this better work 
 
        print 'Reparsing data listing file'

        #myfname="./MyFile" POSIX style, all commented out
        FILE = codecs.open('data.txt', mode="r", encoding="utf8")
        MYFILE = open ( "MYFILE.txt", "w")
        # MYFILE = open (myfname, "w")
        buffer = ''
        buffer_list=[]
        table_list=[]
        
        for i in range(1000000): # This will support data_listing file <10000KB
          
          char =FILE.read(1)
          if char != '\n' : 
            buffer = buffer + char
          else:
            if  buffer !='SuperMonkeyMan' and buffer!='':
              buffer_list.append(buffer)
              buffer = '' 
        for i in range (len(buffer_list)):
          L = buffer_list[i]
          if (i+1)!= len(buffer_list):
            L2 = buffer_list[i+1]
          if ('Table' in L) and ('data' in L):
            table_list.append(L)
            table_list.append(L2)

        
        FILE.close
        FILE =codecs.open('data.txt', mode="r", encoding="utf8")
        
        for line in FILE:
          if 'Tables' in line:
            MYFILE.write('* Tables\n\n\n') 
            temp_string = ''
            for L in (table_list):
              if ('Table' in L) and ('i,j' not in L) and (('ts_data' in L) or (('tab' in L) and ('data' in L))):
	   	back = 1
		counter = len(L)-1
		while counter != 0:
			if L[counter]==',':
				break
			else:
				back = back +1
				counter = counter - 1
		back = back *(-1)
                
		for ncounter in range (0,len(L)):
                  temp_string = temp_string + L[ncounter]
                  if L[ncounter+1] == ')':
                    
		    MYFILE.write('\n')
		    a=temp_string[0:back+2]+',i)\n\n'
                    
		    MYFILE.write( a)
                    temp_string = ''
                    break
             #Writes Table lines that don't need to change 
	      else:
                MYFILE.write (L)
                MYFILE.write ('\n\n')
	      
	  # Writes all non Table lines
          else:  
	    if ("Table " in line): 
		MYFILE.write("* Table summarizing network connectivity ('Connectivity matrix')\nTable\tConnect(i,j)\n$\tinclude connect.txt")
		break
            else:  
	      MYFILE.write(line)
        
        MYFILE.close()  # MYFILE is correct at this point, now need to copy all contents of MYFILE to FILE
        FILE.close()
        #MYFILE = open(myfname, "r")
        
        MYFILE =  codecs.open("MYFILE.txt", "r")
        FILE = codecs.open('data.txt', mode="w", encoding="utf8")
        
        for line in MYFILE:
          FILE.write(line)
        
        MYFILE.close() 
        FILE.close()
        os.remove('MYFILE.txt')

        # The following writes history file with the time of the last export
        import time
        hist_file_name = os.path.join(basedir,"export_stamp")
        hist_file = open(hist_file_name,'w')
        hist_file.write(str(time.localtime())+'\nsign_off')
        hide = 'ATTRIB +H ' + hist_file_name
        os.system(hide)
 
    def export_otype(self, basedir, otype, objects, network=None):
        """
        Export all the fields for the given object type into the selected 
        directory
        """
       # parameter type in a group are homogenous
        if len(otype.fields) == 0:
            return (None, None,None)
        
        # group fields by type
        parameters = []
        timeseries = []
        tables = []
        sparams = []
        refs = []

        for field in otype.fields:
            logging.debug('--> %s' % field.name)
            if isinstance(field.attribute_type, fd.Parameter):
                parameters.append(field)
            elif isinstance(field.attribute_type, fd.TimeSerie):
                timeseries.append(field)
            elif isinstance(field.attribute_type, fd.Table):
                tables.append(field)
            elif isinstance(field.attribute_type, fd.SeasonalParameter):
                sparams.append(field)
            elif isinstance(field.attribute_type, fd.NodeReferences):
                refs.append(field)
            elif isinstance(field.attribute_type, fd.LinkReferences):
                refs.append(field)
         
        # add a sheet per field types per type if needed
        output_parameters = list()
        output_tables = list()
        
        if len(parameters) > 0 :
            name = self.get_sheet_name(otype, None, "par")
            logging.debug("Parameter : %s " % name)
            dim = self.export_parameters(basedir, name, 
                                     objects, 
                                     parameters)
            output_parameters.append(name)
            output_tables.append([name, dim])

        if len(timeseries) > 0:
            name = self.get_sheet_name(otype, None, "ts")
            logging.debug("Time serie : %s " % name)
            dim = self.export_timeseries(basedir, name, 
                                     objects, 
                                     timeseries)
            output_parameters.append(name)
            output_tables.append([name, dim])
            
        if len(tables) > 0:
            # Table export is one table per sheet per node type
            for table in tables:
                name = u"%s_tab_%s" % (otype.name.strip(),
                                   table.name)
                logging.debug("Table : %s " % name)
                dim = self.export_table(basedir, name, objects, table)
                output_tables.append([name, dim])
                
        if len(sparams) > 0:
            # Table export is one table per sheet per node type
            for sparam in sparams:
                name = u"%s_tab_%s" % (otype.name.strip(),
                                   sparam.name)
                logging.debug("Seasonal param : %s " % name)
                dim = self.export_seasonal_parameter(basedir, name,
                                                     objects, sparam)
                output_tables.append([name, dim])
                
        if refs:
          self.export_references(basedir, network, objects , refs)


        
        return (output_parameters, output_tables, refs)


    def export_references(self, base_dir, network, objects ,refs_list):
      # Exports both node and link references
      # Gets a list of references from the 'export_otype' function 
      # gets hit twice, once for the nodes and once for the links
      import hydroplatform.model.network as nt
      # Need this import to check if obj is a node or a link
      hist_file_name = os.path.join(base_dir,"export_stamp")
      NEW_EXPORT = False
      FILE_MODE = 'r+'
      NODE = isinstance(objects[0], nt.Node)

      if os.path.exists(hist_file_name):
        hist_file = open(hist_file_name,'r')
        hist_text = hist_file.read()
        if 'sign_off' in hist_text:
          NEW_EXPORT = True
          hist_file.close()
          os.remove(hist_file_name)
      if NEW_EXPORT:
        FILE_MODE = 'w'
      # 'signoff' is simply a flag inserted into a file with a timestamp which indicates that the export in question was completed.

      list_of_objects_with_refs = []
      for obj in objects:
        for field in obj.type.fields:
          fval = obj.getFieldValue(self.session, field)
          if field in refs_list and obj not in list_of_objects_with_refs and fval and fval.value:
            list_of_objects_with_refs.append(obj)

      if list_of_objects_with_refs :
        fname =''
        if NODE:
          fname  = 'NODEref.txt'
        else:
          fname  = 'LINKref.txt'
        fname = os.path.join(base_dir,fname)
        all_refs_file = codecs.open(fname, mode="a", encoding="utf8")

        if NODE:
          for i in list_of_objects_with_refs:
            all_refs_file.write(i.name+'\n')
        else:
          for i in list_of_objects_with_refs:
            link_name = i.start.name+'.'+i.end.name
            all_refs_file.write(link_name+'\n')
        all_refs_file.close()

      nodes_dict = {}
      links_dict = {}
      # dict to do quick lookups via object id
      for i in network.nodes:
        nodes_dict[i.id] = i.name
      for i in network.links:
        links_dict[i.id] = i.start.name + '.'+ i.end.name

      obj_plus_val_list = []
      counter = len(objects[0].type.fields)
      for i in objects:
        for field_num in range(0,counter) :
          fval = i.getFieldValue(self.session, i.type.fields[field_num])
          if i.type.fields[field_num] in refs_list and fval :
            # Following line creates a list of tuples as such (object, name of field, value of said field[which is actually a list of node_ids], type of field)
            obj_plus_val_list.append (( i, i.type.fields[field_num].name, fval.value, i.type.fields[field_num].attribute_type))
      
      file_list = []
      # file_list is a tuple of (filename[without '.txt'], file pointer)
      for i in refs_list:
        file_path = os.path.join(base_dir, i.name + ".txt")
        ofile = codecs.open(file_path, mode="a", encoding="utf8")
        file_list.append(( i.name, ofile ))

      for quadruple in obj_plus_val_list:
        for ofile in file_list:
          if quadruple[2] and quadruple[1] and quadruple[1]==ofile[0]:
            if NODE:
              for item in quadruple[2]:
                # Write for each item in the list of node_ids, 'else' is for links
                ofile[1].seek(0,os.SEEK_END)
                try:
                  if isinstance(quadruple[3], fd.NodeReferences):
                    x = nodes_dict[item]
                  else:
                    x = links_dict[item]
                  ofile[1].write(quadruple[0].name + "." + str(x)+"\n")
                except KeyError:
                  continue
            else:
              for item in quadruple[2]:
                try:
                  if isinstance(quadruple[3], fd.LinkReferences):
                    x = links_dict[item]
                  else:
                    x = nodes_dict[item]
                  ofile[1].seek(0,os.SEEK_END)
                  ofile[1].write(str(quadruple[0].start.name)+"."+ str(quadruple[0].end.name) + "." + str(x) + "\n")
                except KeyError:
                  continue

          else:continue

      for i in range(0,len(file_list)):
        file_list[i][1].close() 


    def get_sheet_name(self, otype, group, ftype, has_multiple_types=False):
        
        if group is not None and len(otype.groups) > 1:
            name = (has_multiple_types > 1) \
                 and u"%%s_%%s_%s" % ftype \
                 or u"%s_%s"
            return name % (otype.name.strip(), 
                           group.name.lower())
        else:
            name = u"%%s_%s" % ftype
            return name % otype.name.strip()

            
    def export_parameters(self, basedir, name, objects, parameters):
        '''
        Exports a parameter to text files :
        
        1. A header file containing the list of parameter names
        2. A data file containing the data for each parameter and node :
        
                Param1  Param2
        Node1    v1       v2
        Node2    v3       v4
        
        Separator is \t
        '''
        
        # export headers definition
        header_file = codecs.open(os.path.join(basedir, name + "_par.txt"), 
                                  mode="w", encoding="utf8")
        # write field name in rows
        for field in parameters:
            header_file.write("%s\n" %  field.name) 
        header_file.close() 

        # initialise the list of columns
        cols = list()     
        # first column is the object name
        cols.append(list())
        # next columns are the values
        for header in parameters:
            cols.append(list())
 
        nobject = objects[0]
        # get value for the given field
        if isinstance(nobject, nt.Node):
            dbtable = nt.NodeValue
            key_id = dbtable.node_id
        elif isinstance(nobject, nt.Link):
            dbtable = nt.LinkValue
            key_id = nt.LinkValue.link_id
        
       
        # first row of first column is an empty string
        cols[0].append(u"")
        for i, field in enumerate(parameters):
            cols[i+1].append(field.name + " ") 
        
        # for each node write its name and values for the each field
        obj_count = count()
        otype = None
        for nobject in objects:
            otype = nobject
            node_has_value = False
            for i, field in enumerate(parameters):
                val = self.get_value(nobject, field)
                if val is not None and val.value is not None: 
                    # output data    
                    node_has_value = True
                    cols[i+1].append("%s" %  val.value)
                else:                    
                    cols[i+1].append("")
            if node_has_value is True:
                cols[0].append(self.get_object_name(nobject))
            else:
                # remove the insertion of blank values for the data
                # cols (cols with index > 0)
                for i in xrange(1, len(cols)):
                    if len(cols[i]) > 0:
                        cols[i].pop()
                
        data_fname = os.path.join(basedir, name + "_data.txt") 
        self.save_cols_to_file(data_fname, cols)
       
        if isinstance(otype, nt.Node):
            return "%s,%s" % (otype.type.name, name+"_par")
        elif isinstance(otype, nt.Link):
            return "i,j,%s" % (name+"_par")
             
    def save_cols_to_file(self, filename, cols):
        """Save the list of columns to the given filename with a fixed
        column width based on the max width of str items in the column
        
        Parameters:
        -----------
        filename : str
            output filename to be used
        cols     : list
            list of columns to be printed to the file. Each column contains a 
            list of str values
        """
        max_lens = [ max( [len(item) for item in column]) for column in cols \
                     if len(column) >0 ]
        # transform max_lens in string formatters for each column
        col_format = ["%%-%ss" % len_str for len_str in max_lens]
        # export data
        data_file = codecs.open(filename, mode="w", encoding="utf8")
        
        for row in zip(*cols):
            data_file.write(" ".join(
                [col_format[i] % item for i, item in enumerate(row)]))
            data_file.write("\n")
        data_file.close()
        
    def get_object_name(self, network_object):
        """Return the object name to be written to the file.
        
        Nodes output the node name. Links outputs "from_name.to_name" using the
        starting and ending node name.
        """
        if isinstance(network_object, nt.Node):
            return network_object.name
        elif isinstance(network_object, nt.Link) :
            return "%s.%s" % (network_object.start.name, 
                              network_object.end.name)
     
    def get_value(self, nobject, field):
        """
        Return the value for the given object and field            
        """
        # get value for the given field
        if isinstance(nobject, nt.Node):
            dbtable = nt.NodeValue
            key_id = dbtable.node_id
        elif isinstance(nobject, nt.Link):
            dbtable = nt.LinkValue
            key_id = nt.LinkValue.link_id
        try:
            
            return self.session.query(dbtable).filter( \
                    and_(dbtable.attribute_id == field.id, \
                         key_id == nobject.id)).first() 
        except Exception, e:
            print "Error"
            raise e

    def export_seasonal_parameter(self, basedir, name, nobjects, field):
        """
        Exports a table to a sheet 
        
        Group table are outputted like this :
        date     node1-value   node2-value ... 
        Y-m-d       v1               v2   
        Y-m-d       v1               v2   
        Y-m-d       v1               v2   
        Y-m-d       v1               v2   
        Y-m-d       v1               v2   

        """
        # No need for the _par file
        ## export headers definition
        #header_file = codecs.open(os.path.join(basedir, name + "_par.txt"), 
                                  #mode="w", encoding="utf8")
        ## write field name in column
        #header_file.write("%s\n" %  field.name) 
        #header_file.close()
        
        # initialise the list of columns
        cols = list()     
        # first column is the object name
        cols.append(list())
         
        FREQ_COLUMN = 0
        # write column labels        
        # add white space for the first column
        cols[FREQ_COLUMN].append("") 
 
        
        # write column labels
        # FIXME : this is an hardcoded bad way of exporting the seasonnal
        # frequency ...
        for i,date in enumerate(field.attribute_type._dates[:-1]):
            cols[FREQ_COLUMN].append("%i"% (i+1))
            if i==10:
              cols[FREQ_COLUMN].append("%i"% (i+2))
        for obj in nobjects:
            val = self.get_value(obj, field)
            if val is not None:                            
                # loop on the rows and write the lines
                matrix = val.value
                if matrix is None or \
                   numpy.all(numpy.isnan(matrix)):
                    continue
                # write node name
                cols.append(list())
                p_list = cols[-1]
                p_list.append(self.get_object_name(obj))
                
                p_list.extend(["%s" % val for val in matrix[:,0].tolist()])
        
 
        # export data
        fname = os.path.join(basedir, name + "_data.txt")
        self.save_cols_to_file(fname, cols)
        
        return "%s, %s" % ("mn", nobjects[0].type.name)
       
                
    def export_table(self, basedir, name, objects, field):
        """
        Exports a table to a sheet 
        
        Group table are outputted like this :
                     tab1-collabel1    tab1-collabel2    tab1-collabel3
        node1  row1     v1               v2                  v3
        node1  row2     vx               v9                  vz
        node1  row3     v1               v2                  v3
        node2  row1     v1               v2                  v3
        node2  row2     vx               v9                  vz
        node2  row3     v1               v2                  v3

        """
        # export headers definition
        header_file = codecs.open(os.path.join(basedir, name + "_par.txt"), 
                                  mode="w", encoding="utf8")
        # write field name in column
        for col in field.attribute_type.columns:
            header_file.write("%s\n" %  col.name) 
        header_file.close()
        
        # initialise the list of columns
        cols = list()     
        # first column is the object name
        cols.append(list())
        # second column is the step dummy variable
        cols.append(list())
         
 
        
        FIRST_COLUMN = 0
        STEP_COLUMN = 1
        # write column labels        
        # add white space for the first column
        cols[FIRST_COLUMN].append("") 
        # add white spaces for the second column
        cols[STEP_COLUMN].append("")
        for i, col in enumerate(field.attribute_type.columns):
            cols.append(["%s" % col.name])

        max_step = 0
        # FIXME : improve NaN management
        NAN = ""
        otype = None
        for obj in objects:
            otype = obj 
            # get the value field
            val = self.get_value(obj, field)
            if val is not None:                            
                # loop on the rows and write the lines
                matrix = val.value
                if matrix is None or \
                   numpy.all(numpy.isnan(matrix)):
                    continue
                for i in xrange(matrix.shape[0]):
                    # write node name
                    cols[FIRST_COLUMN].append(self.get_object_name(obj))
                    # add dummy variable for GAMS
                    cols[STEP_COLUMN].append(u".step%s" % i)
                    for j in xrange(matrix.shape[1]):
                        if not numpy.isnan(matrix[i, j]) :
                            # FIXME : use the format of the field !
                            cols[j+2].append("%f" % matrix[i, j])
                        else:
                            cols[j+2].append("%s" % NAN)
                else:
                    max_step = i if i >= max_step else max_step
        
        # export data
        fname = os.path.join(basedir, name + "_data.txt")             
        self.save_cols_to_file(fname, cols)
        
        # write the steps file
        step_file = codecs.open(os.path.join(basedir, name + "_steps.txt"), 
                                  mode="w", encoding="utf8")
        # write field name in column
        for i in xrange(max_step+1):
            step_file.write("step%i\n" %  i) 
        step_file.close()           
        
        if isinstance(otype, nt.Node):
            return "%s,%s" % (otype.type.name, name+"_par")
        elif isinstance(otype, nt.Link):
            return "i,j,%s" % (name+"_par")
            

        
    def export_timeseries(self, basedir, name, objects, timeseries):
        '''
        Group timeseries are outputted like this :
                   TS1    TS1      TS2
                   Node1  Node2   Node1
        datetime1   ...    ...      ...
        datetime2   ...    ...      ...
        datetime3
        
        Multiple timeseries are concatenated on the right
        '''
        # export headers definition
        header_file = codecs.open(os.path.join(basedir, name + "_par.txt"), 
                                  mode="w", encoding="utf8")
        # write field name in column
        for field in timeseries:
            header_file.write("%s\n" %  field.name) 
        header_file.close()
        
        
        # initialise the list of columns
        
        cols = list()     
        
        YEAR_COLUMN = 0
        MONTH_COLUMN = 1
        DAY_COLUMN = 2
        headers = [YEAR_COLUMN, MONTH_COLUMN, DAY_COLUMN ]
        # write column labels        
        for header in headers:
            # add white space for the first row of the heading columns
            cols.append([""])

        output_list = list()        
        ts_header = list()
        obj_header = list()
        for i, field in enumerate(timeseries):
            for j, obj in enumerate(objects):
                obj_has_value = False
                # get ts for field
                val = self.get_value(obj, field)
                if val is not None:                            
                    matrix = val.value
                    if matrix is None or \
                       numpy.all(numpy.isnan(matrix)) is True:
                        continue
                    else:
                        obj_has_value = True
                    if obj_has_value :
                        cols.append(list())
                        ts_list = cols[-1]
                        # write ts header
                        ts_list.append("%s.%s" % (field.name , self.get_object_name(obj)))
                        # write data
                        ts_list.extend(["%s" % val for val in matrix[:,0].tolist()])
         
        for i, tstamp in enumerate(timeseries[0].attribute_type.dates): 
            cols[YEAR_COLUMN].append("%i ." % tstamp.year)
            cols[MONTH_COLUMN].append("%i ." % tstamp.month)
            cols[DAY_COLUMN].append("%i" % tstamp.day)
            
        # export data
        fname = os.path.join(basedir, name + "_data.txt")
        self.save_cols_to_file(fname, cols)
        
        
        # The following procedure creates a file for weekly timesteps, called wk. Uses datetime module. --Hos
        # Starts by reading the beginning and end dates from the yr.txt (year) file        
        # Increments and prints date, uses 'diff' as increment, where diff is one week
        #yr_file = codecs.open(os.path.join(basedir, u"yr.txt"), mode="r", encoding="utf8") 
        #text = yr_file.read();  yr_file.close()
        #start_yr = int(text[0:4])
        #end_yr = int(text[-5:])+1
        #diff =datetime.timedelta(weeks=1)
        #wk_file = codecs.open(os.path.join(basedir, u"wk.txt"), mode="w", encoding="utf8")
        #wk_file.write("SET\nweek(yr,mn,dy) weekly time step\n/\n")
        #date_var = datetime.date(start_yr,1,1)
        #while date_var.year < end_yr:
        #  wk_file.write(str(date_var.year)+"."+str(date_var.month)+"."+str(date_var.day)+"\n")
        #  date_var = date_var + diff        
        #wk_file.write("/;")
        #wk_file.close()        

        # Julien seems to feel that the weekly timesteps should not be needed at all
        
        
        if isinstance(obj, nt.Node):
            return "yr,mn,dy,%s,%s" % (name + "_par" , objects[0].type.name)
        elif isinstance(obj, nt.Link):
            return "yr,mn,dy,%s,i,j" % (name + "_par")



REGISTRY = ExporterRegistry()
REGISTRY.register(TextExport)
logging.info("TextExporter extension registered")        
