#! /usr/bin/python
# -*- coding: cp1250 -*-

import re
import string
import sys
import glob
import os
import time
import shutil

import pepe.p_txt.bigtxt as bt
import pepe.p_datetime.stopwatches as pds
import pepe.p_dm.sdb as s
import pepe.p_oslevel.basic as osb
import pepe.p_sqlpg.csv2pg as csv2pg
import pepe.p_sqlpg.pg as pg

iso_time_now = lambda : time.strftime ("%Y-%m-%dT%H:%M:%S", time.localtime())
fw_slash = lambda x: re.sub(r'\\', '/', x)

class TxtImport:
    ''' Examine text delimited data file, prepare sql statements for load, load data

    a) read text delimited data file, find out columns width
    b) create CREATE TABLE statement
    c) create COPY statement
    d) load it as postgresql table.

    Syntax: txt2pg <csv file> "^" 1
    '''
    def __init__(self):
        ''' '''
        pass
    
    def setInputDefaults(self):
        """ initialize internal variables """
        self.inputFileName = ''
        self.colDelimiter = '\t'
        self.hasHeader = 0
        self.username = 'postgres'
        
        self.debugmode = False

        self.hasHeaderString = ''
        self.queryCreateString = ''
        # pg does not see files on mapped substed disks (path must be via C disk)
        self.queryLoadString = ''
        #self.pgClientEncoding = 'win1250'
        #if os.name == 'posix': self.pgClientEncoding = 'UTF-8'
        self.pgClientEncoding = 'UTF-8'

    def setDeducedVariables(self):
        path = fw_slash(osb.wai())
        self.inputFile = path + self.inputFileName

        # pg does not see files on mapped substed disks (path must be via C disk)
        #self.replacePathDiskCR = ['R:', 'C:/AFT/root/rDisk/Clients']
        self.replacePathDiskCR = ['R:', 'C:/AFT/root/rDisk']
        self.inputFile = re.sub(  self.replacePathDiskCR[0]
                                , self.replacePathDiskCR[1]
                                , self.inputFile)

        self.inputFileNameOnly = os.path.split(self.inputFile)[1]

        # extract 'ininv' from 'ininv.dat____'
        #self.tableName = string.split(self.inputFileNameOnly, '.')[0]
        # 2011-08-15_1803 as
        # Now we are replacing every "." with "_" and loading that as table name
        # There were issues when importing data that was exported with pgexport.
        self.tableName = re.sub('\.','_', self.inputFileNameOnly)

        # we will ignore '#' character in file name
        self.tableName = re.sub('#', '', self.tableName)

    def queryCreateTable(self):
        """ scan file and create queryCreateString """
        if self.hasHeader == 1:
            cts = csv2pg.createTableString(   self.inputFile
                                            , self.tableName
                                            , colDelimiter=self.colDelimiter
                                            , hasHeader=1)
        else:
            cts = csv2pg.createTableString(   self.inputFile
                                            , self.tableName
                                            , colDelimiter=self.colDelimiter
                                            , hasHeader=0)
        self.queryCreateString = cts
    
    def createTable(self):
        """ """
        p = pg.Pg()
        p.debug_mode = self.debugmode
        p.user = self.username
        p.connect()

        p.query(self.queryCreateString)

    def queryLoadTable(self):
        """ create queryLoadString """
        #if self.hasHeader == 1 : self.hasHeaderString = 'CSV HEADER'
        if self.hasHeader == 1 : self.hasHeaderString = ''
        self.queryLoadString = re.sub('\n {20}', '\n', """
                    SET client_encoding TO '%s';
                    COPY %s FROM '%s' with DELIMITER '%s' %s
                    """ % (   self.pgClientEncoding
                            , self.tableName
                            , fw_slash(self.inputFile)
                            , self.colDelimiter
                            , self.hasHeaderString
                            ))

    def loadData(self):
        """ """
        p = pg.Pg()
        p.debug_mode = self.debugmode
        p.user = self.username
        p.connect()

        p.query(self.queryLoadString)

HELP = """#! /usr/bin/python
# -*- coding: cp1250 -*-

import re, string, sys, glob, os, time, shutil
from datetime import datetime as dt
import pepe.p_sqlpg.pg as pg
import pepe.p_sqlpg.txt2pg as ppt

if 1: # load table
    print dt.now()

    p = ppt.TxtImport()

    p.setInputDefaults()

    p.debugmode = True
    p.inputFileName='raw_gl_sample.#.csv'
    p.colDelimiter='\t'
    p.hasHeader=1
    p.setDeducedVariables()

    if 1: # scan file and create string
        p.queryCreateTable()
        of = open('#c.sql', 'w'); of.write(p.queryCreateString); of.close()

    if 1: # (modify query), create table and load data
        p.queryCreateString = open('#c.sql', 'r').read()
        
        p.queryLoadTable()
        of = open('#l.sql', 'w'); of.write(p.queryLoadString); of.close()
        
        p.createTable()
        p.loadData()

    print dt.now()

if 0: # select from table
    p = Pg()
    p.connect()

    p.query = 'select * from <table name> limit 3'
    for r in p:
        print r[:3]

    # select count
    p.query('select count(1) from <table>')
    for x in p:
        print x[0]

if 0: # drop table
    p.query('drop table <table name>')

if 0: # export table
    q = '''
    COPY    (select * from <table>)
            TO '%s' with DELIMITER '^'
    '''
    p.query(q)
"""

if __name__ == '__main__':
    """ """
    of = open('_txt2pg.py', 'w')
    of.write(HELP)
    of.close()
