#!/usr/bin/python
# -*- coding: utf-8 -*-

import shutil
import glob
import os
import string
import re
import pepe.glib as g
import pepe.p_txt.bigtxt as bt
import pepe.p_filesystem.basic as pfb
import pepe.p_filesystem.dfstats as dfs

fslist = lambda x, y: pfb.fslist(x, patterns=y, recurse=True, return_folders=False)

debug = False

headers = {
    "DATE   USER    BATCH    RELEASE     ITGN    HEADER   BT ACCT LS IC  RE AR <--  LOCATION   --> %ADJ.     D E B I T    C R E D I T    ": 'h03'
}

def sap_all_header(f):
    lino = 0
    htype = 'unknown'
    for line in bt.lines(f):
        lino += 1

        #line = re.sub('\x0D\x0A', '', line)
        line = re.sub('\x0D', '', line)
        line = re.sub('\x0A', '', line)

        if lino < 10 and htype == 'unknown':
            if headers.has_key(line):
                return headers[line]
    return 'unknown'

def headerOrder(ht):
    order = {
        'h03' : [
            # DT columns
            'DT_SOURCE',
            # from header
            'ACCT_MO','CYCLE','ENTITY','PAGE','RUN_DATE','REPORT_ID','CURRENCY',
            # data rows
            'DATE','USER','BATCH_NO','BATCH_ST','RELEASE_USER','RELEASE_DATE','ITGN','HEADER_NUMBER','BT','ACCT',
            'LS','IC','RE','AR','LOCATION_SE','LOCATION_DEPT','LOCATION_SF','LOCATION_PT','LOCATION_PL','LOCATION_SP',
            'PERCENT_ADJ','DEBIT','CREDIT'
        ]
    }
    return list(order[ht])

def writeItem(o, ht, item = []):
    order = headerOrder(ht)
    itno = 0
    if item == []:
        # writing header
        for key in order:
            if itno > 0:
                o.write('\t')
            o.write(key)
            itno += 1
        o.write('\x0D\x0A')
    else:
        itno = 0
        for key in order:
            if itno > 0:
                o.write('\t')
            o.write(item[key].strip())
            itno += 1
        o.write('\x0D\x0A')

# filtering data
if 1:
    o = {}
    d = {}
    fp = {}
    tls = {}
    # open all files needed
    for h in headers:
        tls[headers[h]] = open('totals_' + headers[h] + '.#', 'wb')
        o[headers[h]] = open('output_' + headers[h] + '.csv', 'wb')
        d[headers[h]] = open('dropped_' + headers[h] + '.#', 'wb')
        fp[headers[h]] = open('files_processed_' + headers[h] + '.#', 'wb')
        # writing headers
        writeItem(o[headers[h]], headers[h])
        tls[headers[h]].write("DIFFERENCE_CREDIT\tDIFFERENCE_DEBIT\tCALCULATED_CREDIT\tCALCULAED_DEBIT\tEXCTRACTED_CREDIT\tEXCTRACTED_DEBIT\tORIGINAL_LINE\tFILE_NAME\n")
    b = open('bad-lines.#', 'wb')

    issued_files = []
    files_dest = r"""C:\temp_kf\Cyrill_Rawdata""" + '\x5C'
    files_masks = [
        [r"""Armada\2010-09-20\AUMARM2004R83""" + '\x5C'],
        [r"""Armada\2010-09-20\E8MARM2001R""" + '\x5C'],
        [r"""Armada\2010-09-20\E8MARM2002R""" + '\x5C'],
        [r"""Armada\2010-09-20\S2MARM2001R71""" + '\x5C']
    ]

    if debug == True:
        files_dest = ''
        files_masks = [
            [r"""C:\AFT\devel\2010-11-15_1640kf_extraction_h3\sample_files""" + '\x5C']
        ]
       

    for fm in files_masks:
    
        files = fslist(files_dest + fm[0], "*")
        print len(files),' found in ',fm

        for f in files:
            
            file_info = dfs.dfstats(f, options=[])
            fname = string.split(f, '\x5C')[-1]
            print f
            
            dt_source = f[f.find('Cyrill_Rawdata'):]
            dt_header = sap_all_header(f)
            print dt_header

            if dt_header == 'unknown':
                print '  File was skipped.'

            else:

                fp[dt_header].write(dt_source + '\t' + str(file_info['size']) + '\x0D\x0A')

                validation_totals = {}                
                hdr = {
                    'DT_SOURCE' : dt_source,
                    'ACCT_MO'   : '?',
                    'CYCLE'     : '?',
                    'ENTITY'    : '?',
                    'PAGE'      : '?',
                    'RUN_DATE'  : '?',
                    'REPORT_ID' : '?',
                    'CURRENCY'  : '?'
                }
                tpd_print_next_line = False

                lino = 0
                extr_line_count = 0
                for line in bt.lines(f):
                    line = re.sub('\x0D', '', line)
                    line = re.sub('\x0A', '', line)
                    
                    # split line in case of '\x0C' character is interpreted as line delimiter
                    if string.count(line, '\x0C') == 0:
                        lines = [line[:]]
                    else:
                        lines = string.split(line, '\x0C')

                    for line in lines:
                        lino += 1
                        
                        # valid data line
                        if dt_header == 'h03' and re.search('^.{73} \d{2} \d{4} [A-Z0-9]{2} \d{2} \d{2} \d{2}', line):
                            extr_line_count += 1            # increase ectracted line counter
                            order = headerOrder(dt_header)
                            if dt_header == 'h03':
                                try:
                                    itm = {}
                                    itm['DATE'] = line[0:6].strip() 
                                    itm['USER'] = line[7:13].strip() 
                                    itm['BATCH_NO'] = line[14:18].strip() 
                                    itm['BATCH_ST'] = line[19:21].strip() 
                                    itm['RELEASE_USER'] = line[22:28].strip() 
                                    itm['RELEASE_DATE'] = line[29:35].strip() 
                                    itm['ITGN'] = line[36:43].strip() 
                                    itm['HEADER_NUMBER'] = line[44:52].strip() 
                                    itm['BT'] = line[53:55].strip() 
                                    itm['ACCT'] = line[56:60].strip() 
                                    itm['LS'] = line[61:63].strip() 
                                    itm['IC'] = line[64:66].strip() 
                                    itm['RE'] = line[67:70].strip() 
                                    itm['AR'] = line[71:73].strip() 
                                    itm['LOCATION_SE'] = line[74:76].strip() 
                                    itm['LOCATION_DEPT'] = line[77:81].strip() 
                                    itm['LOCATION_SF'] = line[82:84].strip() 
                                    itm['LOCATION_PT'] = line[85:87].strip() 
                                    itm['LOCATION_PL'] = line[88:90].strip() 
                                    itm['LOCATION_SP'] = line[91:93].strip() 
                                    itm['PERCENT_ADJ'] = line[94:99].strip() 
                                    itm['DEBIT'] = line[100:116].strip() 
                                    itm['CREDIT'] = line[117:].strip()
                                    validation_totals[hdr['CURRENCY']]['credit'] += g.string2number(itm['CREDIT']) 
                                    validation_totals[hdr['CURRENCY']]['debit']  += g.string2number(itm['DEBIT'])
                                except:
                                    # print to bad lines (cold not be extracted)
                                    b.write('%d\t%s\t%s\t%s\x0D\x0A'%(lino, line, dt_header, dt_source))
                            itm.update(hdr)
                            writeItem(o[dt_header], dt_header, itm)

                        # line to drop
                        else:
                            
                            # droping line as non data line
                            d[dt_header].write(line + '\x0D\x0A')

                            # catching info from header lines
                            if line[0:8].strip() == "ACCT-MO:":
                                hdr['ACCT_MO'] = line[8:17].strip()
                                hdr['CYCLE'] = line[23:45].strip()
                                hdr['ENTITY'] = line[52:90].strip()
                                hdr['PAGE'] = line[126:].strip()
                            elif line[0:9].strip() == "RUN DATE:":
                                hdr['RUN_DATE'] = line[9:28].strip()
                            elif line[0:11] == " REPORT-ID:":
                                hdr['REPORT_ID'] = line[11:26].strip()
                            elif line[0:28] == "                   CURRENCY:":
                                hdr['CURRENCY'] = line[28:].strip()
                                if not validation_totals.has_key(hdr['CURRENCY']):
                                    validation_totals[hdr['CURRENCY']] = {
                                        'credit'    : 0.0,
                                        'debit'     : 0.0
                                    }
                            # validation totals
                            elif line[44:52] == 'CURRENCY':
                                de = line[100:116].strip()  # exctracted value
                                ce = line[117:].strip()
                                if extr_line_count > 0:
                                    tls[dt_header].write("%f\t%f\t%f\t%f\t%s\t%s\t%s\t%s\x0D\x0A" % (
                                        g.string2number(ce) - validation_totals[line[74:76]]['credit'],
                                        g.string2number(de) - validation_totals[line[74:76]]['debit'],
                                        validation_totals[line[74:76]]['credit'],
                                        validation_totals[line[74:76]]['debit'],
                                        ce,
                                        de,
                                        line,
                                        hdr['DT_SOURCE']
                                    ))
                                else:
                                    if not hdr['DT_SOURCE'] in issued_files:
                                        issued_files.append(hdr['DT_SOURCE'])
                                

                                

    # closing all files
    for h in headers:
        o[headers[h]].close()
        d[headers[h]].close()
        fp[headers[h]].close()
        tls[headers[h]].close()
    b.close()

    # reporting issues
    if len(issued_files) > 0:
        isf = open('issued_files.csv','wb')
        for f in issued_files:
            isf.write('%s\x0D\x0A' % (f))
        isf.close()

'''
Rawdata\2010-09-20\AUMARM2004R83\AUMARM2004R83.1009.txt

ACCT-MO:   0102  CYCLE:   1                  ENTITY:  XX XXXXX XXXX                         ARM4202P.D.03.03             PAGE:    1 
RUN DATE: 03/01/01  16:03:05                       X X X X X X                                                                      
 REPORT-ID:22004                             A/R SUB-SYSTEM TRANSACTION DETAIL REPORT                                               
              TRANSACTION DETAIL REPORT - GERMANY                                                                                   
                   CURRENCY:  DM                                                                                                    
                                                                                                                                    
DATE   USER    BATCH    RELEASE     ITGN    HEADER   BT ACCT LS IC  RE AR <--  LOCATION   --> %ADJ.     D E B I T    C R E D I T    
              NO   ST USER   DATE           NUMBER                        SE DEPT SF PT PL SP                                       
                                                                                                                                    
211201 AUTO   A435 RL 449237 241201 8344077 J5101492 4  1026 83 00     83 83 0000 00 00 00 00                 943.65                
190201 449237 GD94 RL 449237 200201 8393868 4031814  4  1002 83 00     83 83 0000 00 00 00 00                              25,262.88
'''
