#!/usr/bin/python
# -*- coding: cp1250 -*-

from pepe.glib import *

SKEL = """#!/usr/bin/python
# -*- coding: cp1250 -*-
#import sys; sys.path.append(r'C:/xxx/yyy')

import pepe.glib as g
import shutil, glob

import pepe.p_utils.srch as s
import pepe.p_utils.srchr as sr
import pepe.p_utils.csv_cntdel as cd
import pepe.p_txt.bigtxt as bt

from pepe.utils import u

if 1:
    # nonstandard rows outputed to cntdel.errs.log
    u('count-delimiters', '_gl00.dat')

if 0:
    print '> striping blank spaces around columns delimiters'
    u('data-csv-strip', '_gl01.dat')
    shutil.move('_gl01.dat_stripped', 'gl.dat')
    u('count-header-tail, 'gl.dat')
    #u('cht', '*.txt')

if 0:
    # ba step
    # making sure that previous run files are not appended
    for f in glob.glob('*.dat'):
        shutil.move(f, 'deleted')

if 0:
    # join/compare two data files
    # first column has to be key; tab as delimiter

    # for small data using sqlite
    u('jointt', 'cd_glraw.dat', 'cd_tbraw.dat')
    
    # for big data we are using postgresql
    u('joinpg', '*.dat', '\t')

    #shutil.move('twt-joined.twt#', 'data.dat')
    
if 0:
    # ca step
    u('data-rename-abc', '*.txt', 'aaa')
    u('data-add-unique-id', '*.txt_')

if 0:
    # da step
    s.srch(pattern="^.{15}\t[1-9]", filesmask="*PL2008.txt__", ofile='aaa-PL2008.txt___', otype='bare')
    s.srch(pattern="^\t{2}\d{3}\t\d{4}\t", filesmask="*.csv", ofile='coa.txt', otype='bare')

    u('data-replace-delimiter-by-tab', 'tb.txt_')
    sr.srchr(filesmask="tb.txt__", search="	\*    ", replace="'", re_scope='per-line', ofile='tb.txt_3')
    # converting ddd.ddd.ddd,dd to english numeric format ..... dddddd.dd
    sr.srchr(filesmask="tb.txt_3", search="\.", replace="", re_scope='per-line', ofile='tb.txt_4')
    sr.srchr(filesmask="tb.txt_4", search=",", replace=".", re_scope='per-line', ofile='tb.dat')

    sr.srchr(filesmask="coa.txt", search="^\t{2}(?P<A>\d{3})\t", replace="'\g<A>", re_scope='per-line', ofile='coa.dat')

if 0:
    u('work-db-create-setup')

if 0:
    u('data-rename-abc', '*.txt', 'aac')

    u('copy-files', 'aad-KNA1_Croatia_AllCust_100609.txt_', 'aad_xCMF.dat')
    u('copy-files', 'aae-LFA1_Croatia_752_100609.txt_', 'aae_xVMF.dat')
   
    u('data-replace-delimiter-by-tab', '*MF.dat')

    u('data-add-unique-id', '*MF.dat_')
    u('data-csv-header', '*MF.dat__')
    
    u('data-load-to-db', '*MF.dat__')
    u('export-pgtable', 'aad_xCMF')

if 0:
    pass
    u('run-sql-file', 'compare.sql')
    u('export-pgtable', '_adhoc.xxx')

'''
postgresql
    r epg public.invoi.{1,}     export pg table              

text (delimited file) 
    r cht.py abc\*.csv          count rows and sample
    r csvh.py abc\*.csv         report csv header
    r hdr.py abc\*.csv          report csv header 
'''
import glob, re, string, pepe.p_txt.bigtxt as bt

if 0:
    n = '\x0D\x0A'

    of = open('output.dat', 'wb')
    df = open('dropped.dat', 'wb')
    of.write('...\\n')
    
    for inpfile in glob.glob('*.txt'):
        for line in bt.lines(inpfile):  
            line = re.sub('\\n', '', line)       
            if re.match('\d{7}', line[:7]) and len(line.split('\t')) == 24:
                of.write(inpfile[:13]+'\t'+line+'\\n')
            else:
                df.write(inpfile[:13]+'\t'+line+'\\n')
    of.close()
    df.close()
    
if 1:
    from pepe.p_utils.data_add_unique_id import main as daui
    # add unique id to gl.dat
    daui('gl0.dat')
    
from pepe.glib import *

def main(infl, otfl):
    ''' standard template
    '''
    '''
        Title: Universally Unique Identifier Generator
        
        Function: uuid
            Generates a time-based UUID string <http://en.wikipedia.org/wiki/UUID>.
        
        Parameters:
            c - true to use your computers MAC code (default: false)
        
        Returns:
            A UUID string.
        
        About: License
            - Version 1.1 by Titan <http://www.autohotkey.net/~Titan/#uuid>.
            - Licenced under GNU GPL <http://creativecommons.org/licenses/GPL/2.0/>.
    '''

    ## redirect stdo to file
    #sys.stdout = open('stdo.log', 'w')
    ## close and redirect stdo back to normal
    #sys.stdout=sys.__stdout__

    of = open(otfl, 'w')
    lino = 0
    for inpfile in glob.glob(infl):
        for line in bt.lines(inpfile):
            line = re.sub('', '', line)
            #print line,
            of.write(line+'\\n')
    of.close()

# cut positioned text to columns
import pepe.p_txt.coltxt as ct
c = ct.ColumnsSpaces()
c.examine('o1.dat')
print c.spaces
#>> rows examined >>  14140
#[0, 7, 30, 61, 84, 103]

# Read text delimited file and save it as shelve database. Split each row to list. 
## To read in this way created shelve database use lsdb.py.
## rowsskip=1 means txt file has header

import pepe.p_utils.txt2sdb as txdb
txdb.main(infile="C:\..\...csv", cdelimiter="^", rowsskip=1)

# list info about shelve database
import pepe.p_utils.lsdb as lsdb
lsdb.main(<shelve db name>)


def timeo(fun, n=1):
    start = time.clock()
    for i in range(n): fun()
    thetime = time.clock() - start
    print "%s: %.2f secs"%(fun.__name__, thetime)

#timeo(function)

import pepe.p_utils.encd as encd

if 0: # encode to utf8
    for f in glob.glob('*.txt'):
        print f
        #, inpEncoding='cp1250' # Navision
        #, inpEncoding='cp852' # PVODwin
        encd.main(    inpFile=f
                    , inpEncoding='utf-8' # PVODwin
                    , outEncoding='cp1250'
                    , outFileExt='.txt0')

if __name__ == "__main__":
    ''' '''
    if  len(sys.argv) <> 3:
        #sys.stderr.write('usage: copytime source destination')
        #sys.exit(2)
        print main.__doc__
        pass
    else:
        main(sys.argv[1], sys.argv[2])
        win32api.WinExec('start notepad output.txt')
"""

if __name__ == "__main__":
    """ """
    of = open('_skel.py', 'w')
    #of.write(re.sub("^    ", '', SKEL))
    of.write(SKEL)
    of.close()