#! /usr/bin/python
# -*- coding: cp1250 -*-

#TODO: make more readable DATA['LINES_LENGTHS']

import re, os, string, sys
import pepe.p_dm.sdb as shelve_store
import pepe.p_filesystem.basic as pfb
import pepe.p_txt.bigtxt as bt

zf = lambda x, y: string.zfill(str(x), y)
sh_write = lambda dbkey, data, db: shelve_store.swrite(dbkey, data, db)
sh_listdb = lambda db: shelve_store.slist(db)
sh_readkey = lambda dbkey, db: shelve_store.sread(dbkey, db)

class Dti:
    ''' delimited text inspection class'''
    def __init__(self, column_delimiter='\t', file_path='.',
                 file_mask='*', dbkey='', skip_header=0, write_big_file=0, debug=0): # e.g. file_mask='*.txt;*.log'
        ''' '''
        self.skip_header = skip_header
        self.column_delimiter = column_delimiter
        self.file_path = file_path
        self.file_mask = file_mask
        self.write_big_file = write_big_file
        self.debugMode = debug

        if dbkey == '' : self.database_key = re.sub('\W{1,}','', self.file_mask)
        else: self.database_key = dbkey
        self.database = '#' + self.database_key + '_db.dti'
        self.one_big_file = '#' + self.database_key + '_onebigfile.dti'
        #self.error_file = '#' + self.database_key + '_errors.dti'

    def process_data(self):
        ''' '''
        DATA = {'DATA_DELIMITERS_COUNTS':{}, 'DATA_LINES_COUNT': 0, 'COLUMNS_VALUES':{},
                'COLUMNS_SPANS':{}, 'LIST_INPUT_FILES':[], 'COLUMN_DELIMITER':self.column_delimiter,
                'FILE_PATH':self.file_path,'FILE_MASK':self.file_mask,'DATABASE_KEY':self.database_key,
                'LINES_LENGTHS':{}, 'HEADER':[]}
        DATA['LIST_INPUT_FILES'] = pfb.fslist(self.file_path, self.file_mask)
        if self.write_big_file == 1: big_file = open(self.one_big_file, 'w')
        
        for inp_file in DATA['LIST_INPUT_FILES'][:]:
            #print inp_file
            for line in bt.lines(inp_file):

                # last line in file(s) could be without \n ... play safe and remove all \n
                if line[-1:] == '\n': line = line[:-1]

                DATA['DATA_LINES_COUNT'] += 1

                if self.skip_header == 1 and DATA['DATA_LINES_COUNT'] == 1:
                    DATA['HEADER'] = string.split(line, self.column_delimiter)
                    if self.debugMode == 1: print DATA['HEADER']
                else:
                    if self.debugMode == 1: print DATA['DATA_LINES_COUNT']
                    if self.write_big_file == 1: big_file.write(line+'\n')
                    cols = string.split(line, self.column_delimiter)
                    if self.debugMode == 1: print cols
                    cols_count = len(cols)

                    ######
                    # LINES_LENGTHS
                    line_len = len(line)
                    if DATA['LINES_LENGTHS'].has_key(line_len):
                        DATA['LINES_LENGTHS'][line_len] += 1
                    else:
                        DATA['LINES_LENGTHS'][line_len] = 1
                    # DELIMITERS
                    if DATA['DATA_DELIMITERS_COUNTS'].has_key(cols_count):
                        if inp_file not in DATA['DATA_DELIMITERS_COUNTS'][cols_count]:
                            DATA['DATA_DELIMITERS_COUNTS'][cols_count].append(inp_file)
                    else:
                        DATA['DATA_DELIMITERS_COUNTS'][cols_count] = [inp_file]
                    # check inconsistent delimiters counts ...
                    if  len(DATA['DATA_DELIMITERS_COUNTS'].keys()) > 1:
                        print """DATA['DATA_DELIMITERS_COUNTS'].keys() > 1 !!!:"""
                        print DATA['DATA_DELIMITERS_COUNTS'].keys()
                        print 'LINE >>',DATA['DATA_LINES_COUNT'],line
                        sys.exit(2)

                    ######
                    # COLUMNS
                    cno = 0
                    for col_index, col in enumerate(cols):
                        cno += 1

                        # len() is time expensive, therefore
                        if cols[col_index] == '': col_len = 0
                        else:
                            col_len = len(cols[col_index])

                        # {}.has_key/get() is time expensive, therefore
                        if  (self.skip_header == 0 and DATA['DATA_LINES_COUNT'] == 1) or \
                            (self.skip_header == 1 and DATA['DATA_LINES_COUNT'] == 2):
                            DATA['COLUMNS_VALUES'][cno] = [cols[col_index]]
                            DATA['COLUMNS_SPANS'][cno] = [col_len,cols[col_index]]
                        else:
                            if self.debugMode == 1: print DATA['COLUMNS_VALUES']
                            if len(DATA['COLUMNS_VALUES'][cno]) < 4:
                                if col not in DATA['COLUMNS_VALUES'][cno]:
                                    DATA['COLUMNS_VALUES'][cno].append(col)
                            if DATA['COLUMNS_SPANS'][cno][0] < col_len:
                                DATA['COLUMNS_SPANS'][cno] = [col_len,cols[col_index]]

        sh_write(self.database_key, data=DATA, db=self.database)
        if self.write_big_file == 1: big_file.close()

class DbDti:
    ''' '''
    def __init__(self, dbkey=''):
        ''' '''
        self.dbkey = dbkey
        self.db = '#' + self.dbkey + '_db.dti'
        self.DATA = sh_readkey(self.dbkey, self.db)

    def list_columns_values(self):
        print '\n--^^^'+'-'*40,self.dbkey,'>>> list_columns_values()\n'
        for k in sorted(self.DATA['COLUMNS_VALUES'].keys()):
            print zf(k,3),'\n\tCOLUMNS_VALUES(4max):', self.DATA['COLUMNS_VALUES'][k]

    def list_columns_spans(self):
        print '\n--^^^'+'-'*40,self.dbkey,'>>> list_columns_spans()\n'
        for k in sorted(self.DATA['COLUMNS_SPANS'].keys()):
            print zf(k,3),'\n','\tCOLUMNS_SPANS:', self.DATA['COLUMNS_SPANS'][k]

    def list_data_delimiters_counts(self):
        print '\n--^^^'+'-'*40,self.dbkey,'>>> list_data_delimiters_counts()\n'
        print 'DATA_DELIMITERS_COUNTS :', self.DATA['DATA_DELIMITERS_COUNTS'].keys()

    def list_info(self):
        print '\n--^^^'+'-'*40,self.dbkey,'>>> list_info()\n'
        if not self.dbkey in ('BSEG','REGUP'):
            print '    column_delimiter : ', self.DATA['COLUMN_DELIMITER']
            print '    lines_lengths : ', self.DATA['LINES_LENGTHS']
            print '    file_path : ', self.DATA['FILE_PATH']
            print '    file_mask : ', self.DATA['FILE_MASK']
            print '    database_key : ', self.DATA['DATABASE_KEY']

        print '    DATA_DELIMITERS_COUNTS :', str(self.DATA['DATA_DELIMITERS_COUNTS'].keys())
        print '    DATA_LINES_COUNT :', str(self.DATA['DATA_LINES_COUNT'])
        print '    len(LIST_INPUT_FILES) :', str(len(self.DATA['LIST_INPUT_FILES']))

        print '\n--^^^'+'-'*40,self.dbkey,'>>> list_columns...()\n'
        for ix, k in enumerate(sorted(self.DATA['COLUMNS_SPANS'].keys())):
            if len(self.DATA['HEADER']) > 0:
                print zf(k,3) + '_' + self.DATA['HEADER'][ix]
            else:
                print zf(k,3)
            print '    COLUMNS_VALUES(4max):', self.DATA['COLUMNS_VALUES'][k]
            print '    COLUMNS_SPANS:', self.DATA['COLUMNS_SPANS'][k]

    def create_table_postgresql(self):
        ''' '''
        print '\n--^^^'+'-'*40,self.dbkey,'>>> create_table_postgresql()'

        FROM_PATH = re.sub(r'\\', '/', os.getcwd())
        FROM_PATH = re.sub('R:/Clients/prague-dm/', 'C:/AFT/data/drive_r/Clients/prague-dm/', FROM_PATH)
       
        res = """
        --SET client_encoding TO win1250;
        SET client_encoding TO 'UTF-8';
        SET standard_conforming_strings TO on;
        --DROP TABLE """ + self.dbkey + """;
        CREATE TABLE """ + self.dbkey + """ (\n            """
        for ix,k in enumerate(sorted(self.DATA['COLUMNS_SPANS'].keys())):
            if len(self.DATA['HEADER']) > 0:
                res = res + 'c' + str(zf(k,3)) + '_' + self.DATA['HEADER'][ix] + ' VARCHAR(' + str(self.DATA['COLUMNS_SPANS'][k][0] + 1) + '),\n' + ' '*12
            else:
                res = res + 'c' + str(zf(k,3)) + ' VARCHAR(' + str(self.DATA['COLUMNS_SPANS'][k][0] + 1) + '),\n' + ' '*12
        res = res[:-2] + '\n            );\n        '
        res = res + """COPY """ + self.dbkey +  """ FROM \'""" + FROM_PATH + '/' + self.DATA['LIST_INPUT_FILES'][0]
        res = res + """_noheader\' with DELIMITER '<\t|~^>';\n"""
        res = re.sub('^\s{,}', '', res)
        res = re.sub('\n\s{,}', '\n', res)
        res = re.sub('\nc', '\n    c', res)
        res = re.sub(',\n\);', '\n);', res)
        print res

    def create_sas_statements(self):
        ''' '''
        pass

# ---------------------------------------------------------
if __name__ == '__main__':
    #Dti(file_mask='BSEG*').process_data() 
    ##Dti(file_mask='REGUP*').process_data()
    #Dti(file_mask='10.TXT', column_delimiter='|', dbkey='10').process_data()
    Dti(file_mask='test.TXT', column_delimiter='\t', dbkey='test').process_data()
    
    ## redirect output
    sys.stdout = open('#ALL_log.dti', 'w')
    #DbDti(dbkey='BSEG').list_info() 
    #DbDti(dbkey='REGUP').list_info()
    #DbDti(dbkey='10').list_info()
    #DbDti(dbkey='BSEG').create_table_postgresql() 
    #DbDti(dbkey='REGUP').create_table_postgresql()
    #DbDti(dbkey='10').create_table_postgresql()
    DbDti(dbkey='test').list_info()
    sys.stdout = sys.__stdout__
