#! /usr/bin/python
# -*- coding: cp1250 -*-

import re
import string
import sys
import glob
import os
import time
import shutil
import pepe.p_txt.bigtxt as bt
import pepe.p_datetime.stopwatches as pds
import pepe.p_dm.sdb as s
import pepe.p_oslevel.basic as osb

iso_time_now = lambda : time.strftime ("%Y-%m-%dT%H:%M:%S", time.localtime())
fw_slash = lambda x: re.sub(r'\\', '/', x)

class WriteDb:
    """ 
    Read text delimited file and save it as shelve database. Split each row to list. 
    To read in this way created shelve database use lsdb.py.

    Syntax: txt2sdb <csv file> "^" 1

    Example of further handling:
    b = {}
    for r in bcsv_data:
        b[(r[0], r[6])] = str((r[0], r[6])) +"^"+"b"

    o = open('a1.dat','w')
    for k in sorted(a.keys()):
        o.write(a[k]+'\n')
    o.close()
    """
    def __init__(self):
        """ """

    def readFile(self, infile, cdelimiter, rowsskip=0, hugefile=0):
        """ Read text file.
        """
        self.meta =     { 'infile':infile, 'lino':0, 'rowsCount':0
                        , 'infilePath': os.path.split(infile)[0]
                        , 'infileName': os.path.split(infile)[1]
                        , 'header':[], 'columnsCount':[]
                        , 'cDelimiter':cdelimiter, 'rowsSkip':rowsskip 
                        , 'isHugeFile':hugefile
                        }
        self.data = []
        for line in bt.lines(self.meta['infile']):
            line = re.sub('\n', '', line)
            self.meta['lino'] += 1
            cols = string.split(line, self.meta['cDelimiter'])
            if len(cols) > 1:

                # header
                if self.meta['lino'] == 1 and rowsskip == 1:
                    self.meta['header'] = cols

                # data
                if self.meta['lino'] > int(rowsskip):
                    if not len(cols) in self.meta['columnsCount']:
                        self.meta['columnsCount'].append(len(cols))
                    if self.meta['isHugeFile'] == 0 : self.data.append(cols)
                    self.meta['rowsCount'] += 1

        self.meta['dtProcessed'] = iso_time_now()

    def write(self):
        """ Write to shelve db.
        """
        # 
        #keyname = self.meta['infile_name']
        #keyname = re.sub('\W','_', keyname)
        #keyname = re.sub('_{2,}','_', keyname)
        if self.meta['isHugeFile'] == 0: s.shlvDb('data', self.data)
        s.shlvDb('meta', self.meta)

        self.meta['sdbName'] = re.sub('\W','', self.meta['infileName']) + '.sdb'
        shutil.move('sdb.sdb', self.meta['sdbName'])

    def pp(self):
        """ Parameters pretty print. """
        print "\n\'\'\'"
        for k in sorted(self.meta.keys()):
            if k not in ('data', 'header'):
                print string.rjust(str(k), 15), "::",self.meta[k]
        print "\'\'\'\n"

def main(infile, cdelimiter, rowsskip=0, hugefile=0):
    """ """
    t = WriteDb()
    t.readFile(infile, cdelimiter, rowsskip, hugefile)
    t.write()
    t.pp()

if __name__ == '__main__':
    """ """
    #main(r'C:\AFT\COMPARE-FAA\a.csv', "^", 1, 0)
    if len(sys.argv) == 4:
        main(osb.wai()+sys.argv[1], sys.argv[2], sys.argv[3])
    elif len(sys.argv) == 5:
        main(osb.wai()+sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
    else:
        print WriteDb.__doc__
