# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 03:29:06 2015

@author: PC0x10010011
"""

import os, sys, json, codecs, re
from pymongo import MongoClient as mc
from pymongo.errors import DuplicateKeyError
from bson.errors import InvalidStringData, InvalidDocument
from optparse import OptionParser

class ImportData():
    BLOCK = 64 * 1024                       #Default block buffer
    #EOL = '\r\n' if EOL == 'rn' else '\n'   #End of line
    EOL = '\n'
    KEYS = []
    SEPARATER = ','
    FILE_TYPE = None
    SKIP_LINES = None
    SKIP_COLUMNS = None                     #Skip some columns, default none skip
    SKIP_KEY_COLUMNS = None
    SKIP_DATA_COLUMNS = None
    COLUMN_INDEXS = None                    #Just get the columns index in COLUMN_INDEXS
    KEY_COLUMNS = None
    DATA_COLUMNS = None
    LOG = None
    ENCODING = 'utf-8'
    SMART = None
    BLACK = []

    def __init__(self, fp):
        self.fp = fp
        self.BOM = self._test_BOM()
        self.Errorcount = 0                 #Error line count

    def _test_BOM(self):
        bomlist = [codecs.BOM_UTF8,
                   codecs.BOM,
                   codecs.BOM_BE,
                   codecs.BOM_LE,
                   codecs.BOM_UTF32,
                   codecs.BOM_UTF32_BE,
                   codecs.BOM_UTF32_LE]
        bomname = {codecs.BOM_UTF8: 'BOM_UTF8',
                   codecs.BOM: 'BOM_UTF16',
                   codecs.BOM_BE: 'BOM_UTF16_BE',
                   codecs.BOM_LE: 'BOM_UTF16_LE',
                   codecs.BOM_UTF32: 'BOM_UTF32',
                   codecs.BOM_UTF32_BE: 'BOM_UTF32_BE',
                   codecs.BOM_UTF32_LE: 'BOM_UTF32_LE'}
        head = open(self.fp,'rb').read(16)
        for bom in bomlist:
            if head.startswith(bom):
                self.ENCODING = 'utf_' + bomname[bom][7:]
                print '[+]Bom Found: %s' % bomname[bom]
                return bomname[bom], bom
        return None

    def trim_BOM(self, firstblock):
        bom = unicode(self.BOM[1],self.ENCODING)
        if isinstance(firstblock, list):
            firstblock[0] = firstblock[0][len(bom):]
        else:
            firstblock = firstblock[len(bom):]
        return firstblock

    def block_decode(self, block):
        if isinstance(block, unicode):
            return block
        if isinstance(block, list):
            block = self.EOL.join(block)
        return block.decode(self.ENCODING).split(self.EOL)

    def line_decode(self, block):
        result = []
        if not isinstance(block, list):
            block = block.split(self.EOL)
        for line in block:
            if not isinstance(line, unicode):
                try:
                    line = line.decode(self.ENCODING)
                except UnicodeDecodeError as UDE:
                        print '[-]DecodeError: %s' % line
                        if self.LOG:
                            log_err(self.LOG, 'DecodeError: %s\n' % line)
                        print UDE.reason
                        print UDE.message
                        #continue
            result.append(line)
        return result

    def _to_unicode(self, block):
        try:
            return self.block_decode(block)
        except UnicodeDecodeError:
            return self.line_decode(block)

    ###return data lines and uncompletment line###
    def block_trim(self, block):
        if block == '':
            return None, None
        if isinstance(block, list):
            return block, ''
        if self.EOL not in block or block[-1] == self.EOL:
            return block, ''
        lines = block.split(self.EOL)
        return lines[:-1], lines[-1]

    def block_iter(self, file):
        block = ''
        result = ''
        while 1:
            try:
                block = file.readlines(self.BLOCK)
            except MemoryError:
                block += file.read(self.BLOCK)
            result, block = self.block_trim(block)
            if not result:
                break
            yield result

    def block_read(self):
        if self.BOM:
            fh = codecs.open(self.fp,'r',encoding=self.ENCODING)
            return self.block_iter(fh)
        fh = open(self.fp)
        return self.block_iter(fh)

    def smart_column(self, line):
        if self.SEPARATER in ('|','(',')','[',']','{','}','.','?','*','+'):
            self.SEPARATER = '\\' + self.SEPARATER
        smart_quote = re.compile('"(.*?)"')
        smart_separater = re.compile(self.SEPARATER+'+')
        #smart_space = re.compile(' +')
        #smartcolumn = re.compile(self.SMART)

        separaters = smart_separater.findall(line)
        for sp in separaters:
            if len(sp) > 1:
                line = line.replace(sp,self.SEPARATER)
        quote_cols = smart_quote.findall(line)
        if quote_cols:
            for co in quote_cols:
                newcol = co.replace(self.SEPARATER,'_&_')
                line = line.replace(co, newcol)
        return line
    def black_strip(self, line):
        for w in self.BLACK:
            line = line.replace(w, '')
        return line

    def line2dict(self, line):
        KEYERROR = False
        keys = self.KEYS
        values = []
        if keys == []:
            print '[!]SyntaxError: No column keys!'
            sys.exit()
        if self.BLACK:
            line = self.black_strip(line)
        if line.strip() and self.SEPARATER in line:
            try:
                cols = line.strip().split(self.SEPARATER)
                if self.SMART:
                    ###Separate each column with space###
                    for col in cols:
                        values.extend(col.split(' '))
                else:
                    values = [co.strip() for co in cols]
                    #values = cols
                count = len(values)
                if self.SKIP_COLUMNS and self.COLUMN_INDEXS:
                    print '[!]SyntaxError: Please use "--skip" and "--cols" separated'
                    sys.exit()
                elif self.SKIP_COLUMNS:
                    self.SKIP_KEY_COLUMNS = None
                    self.SKIP_DATA_COLUMNS = None
                    skip_indexs = self.get_index(self.SKIP_COLUMNS)
                    keys = [self.KEYS[i] for i in range(len(self.KEYS)) if i not in skip_indexs]
                    values = [cols[i] for i in range(count) if i not in skip_indexs]
                elif self.COLUMN_INDEXS:
                    self.KEY_COLUMNS = None
                    self.DATA_COLUMNS = None
                    column_indexs = self.get_index(self.COLUMN_INDEXS)
                    keys = [self.KEYS[i] for i in column_indexs]
                    values = [cols[i] for i in column_indexs]

                if self.SKIP_KEY_COLUMNS:
                    skip_key_indexs = self.get_index(self.SKIP_KEY_COLUMNS)
                    keys = [self.KEYS[i] for i in range(len(self.KEYS)) if i not in skip_key_indexs]
                if self.SKIP_DATA_COLUMNS:
                    skip_data_indexs = self.get_index(self.SKIP_DATA_COLUMNS)
                    values = [cols[i] for i in range(count) if i not in skip_data_indexs]

                if self.KEY_COLUMNS:
                    key_column_indexs = self.get_index(self.KEY_COLUMNS)
                    keys = [self.KEYS[i] for i in key_column_indexs]
                if self.DATA_COLUMNS:
                    data_column_indexs = self.get_index(self.DATA_COLUMNS)
                    values = [cols[i] for i in data_column_indexs]
            except IndexError:
                KEYERROR = True
            if len(values) != len(keys) or KEYERROR:
                cols = self.smart_column(line.strip()).split(self.SEPARATER)
                if len(cols) == len(keys):
                    values = cols
                else:
                    self.Errorcount += 1
                    if self.LOG:
                        log_err(self.LOG, 'KeyError: %s\n' % line.strip())
                    print '[-]KeyError: %s\n' % line.strip()
                    return
            if values:
                return dict(map(lambda k,v: (k,v), keys, values))
            else:
                return None

    def get_sql_key(self, block):
        for line in block:
            if 'INSERT INTO' in line:
                line = line.split('(')[1].split(')')[0]
                return [k.strip(' ') for k in line.replace('`','').split(',')]
        return []

    def get_sql_data(self, block):
        return [line.replace('(','').replace('),','').replace("'","") for line in block if line.startswith('(')]

    def get_index(self, st):
        if not st:
            return []
        if isinstance(st, list):
            return st
        elif isinstance(st, str):
            for s in st:
                if s not in '-,1234567890':
                    raise Exception('Syntex error: No support with %s' % st)
            if (st.count('-') >0 and st.count(',') >0) or st.count('-') > 1:
                raise Exception('Skip column syntex error: %s' % st)
            elif st.count('-') == 1:
                return range(int(st.split('-')[0]),int(st.split('-')[1]))
            else:
                return [int(i) for i in st.split(',') if i]
        else:
            raise Exception('Syntex error: No support with %s' % st)

    def _skip_line(self, block):
        return block[self.SKIP_LINES:]

    def import_data_txt(self):
        block = self.block_read()
        content = []
        BFIRST = True    #first block
        while 1:
            try:
                content = block.next()
            except StopIteration:
                break
            if BFIRST:
                if self.BOM:
                    content = self.trim_BOM(content)
                if self.SKIP_LINES:
                    content = self._skip_line(content)
                BFIRST = False
            if self.BOM == None:
                content = self._to_unicode(content)
            yield [self.line2dict(line) for line in content if line]

    def import_data_json(self):
        self.EOL = ', '     #self.SEPARATER
        block = self.block_read()
        content = []
        BFIRST = True
        while 1:
            try:
                content = block.next()
            except StopIteration:
                break
            if BFIRST:
                tmp = content[0].split('\n')
                content = tmp + content[1:]
                if self.BOM:
                    content = self.trim_BOM(content)
                if self.SKIP_LINES:
                    content = self._skip_line(content)
                if content[0].startswith('[{'):
                    content[0] = content[0][1:]
                BFIRST = False
            content = self._to_unicode(content)
            if content[-1][-1] == ']':
                content[-1] = content[-1][:-1]
            yield [json.loads(line) for line in content if line.startswith('{')]

    def import_data_sql(self):
        self.SEPARATER = ', '
        block = self.block_read()
        content = []
        BFIRST = True
        while 1:
            try:
                content = block.next()
            except StopIteration:
                break
            if BFIRST:
                if self.BOM:
                    content = self.trim_BOM(content)
                if self.SKIP_LINES:
                    content = self._skip_line(content)
                BFIRST = False
                self.KEYS = self.get_sql_key(content)
                #print self.KEYS

            if self.BOM == None:
                content = self._to_unicode(content)

            content = self.get_sql_data(content)

            yield [self.line2dict(line) for line in content if line]

    def import_data_csv(self):
        block = self.block_read()
        content = []
        BFIRST = True
        while 1:
            try:
                content = block.next()
            except StopIteration:
                break
            if BFIRST:
                if self.BOM:
                    content = self.trim_BOM(content)
                    self.KEYS = content[0].strip().split(self.SEPARATER)
                else:
                    self.KEYS = content[0].strip().decode(self.ENCODING).split(self.SEPARATER)
                content = content[1:]
                BFIRST = False
            if self.BOM == None:
                content = self._to_unicode(content)
            yield [self.line2dict(line) for line in content if line]

    def _import(self):
        if self.fp and not self.FILE_TYPE:
           self.FILE_TYPE = os.path.basename(self.fp).split('.')[-1]

        if self.FILE_TYPE == 'txt':
            return self.import_data_txt()
        if self.FILE_TYPE == 'json':
            return self.import_data_json()
        if self.FILE_TYPE == 'sql':
            return self.import_data_sql()
        if self.FILE_TYPE == 'csv':
            return self.import_data_csv()
        raise Exception('TypeError: the file type is not support!')

### Log error in file ###
def log_err(logfp, msg):
    with open(logfp,'a') as f:
        try:
            f.write('%s\n' % msg.encode('utf-8'))
        except:
            f.write('%s\n' % msg)
    return

### insert one by one and log error line ###
def mongo_insert_one(collection, datalist, logfp):
    errcount = 0
    for d in datalist:
        if not d:
            continue
        try:
            collection.insert(d)
        except DuplicateKeyError:
            pass
        except InvalidStringData:
            msg = '[-]InvalidStringData: %s\n' % d
            print msg
            errcount += 1
            log_err(logfp, msg)
        except InvalidDocument:
            msg = '[-]InvalidDocument: %s\n' % d
            print msg
            errcount += 1
            log_err(logfp, msg)
        except:
            msg = '[-]InsertError: %s\n' % d
            print msg
            raise

    return errcount

def get_options(argv):
    usage = "usage: %prog [options] dest_of_collection_name data_files_to_import"
    parser = OptionParser(usage)
    parser.add_option('-k', '--key', dest='keys', action='store', help = 'The column\'s keys.')
    parser.add_option('-s', '--separater', dest='sp', default=',', action='store', help = 'The separater of columns (Default:",").')
    parser.add_option('-t', '--type', dest='file_type', action='store', help = 'The data file type (Now support: txt,sql,json).')
    parser.add_option('-l','--log', dest='log', action='store', help = 'The log file path.')
    parser.add_option('--buffer', dest='buffer', type='int', default=64*1024, action='store', help = 'Data buffer (Default: 64K).')
    parser.add_option('--EOL', dest='eol', action='store', help = 'The end of line (Default: "\\n".')
    parser.add_option('--encoding', dest='encoding', action='store', help = 'The file encoding (Default: "utf-8".')
    parser.add_option('--skip', dest='skip', action='store', help = 'The skip columns both keys and data-line (Default: no skip).')
    parser.add_option('--skip-key', dest='skip_key', action='store', help = 'The skip columns of keys (Default: no skip).')
    parser.add_option('--skip-data', dest='skip_data', action='store', help = 'The skip columns of data-line (Default: no skip).')
    parser.add_option('--skip-line', dest='skip_line', type='int', action='store', help = 'To skip some head lines of datafile (Default: no skip).')
    parser.add_option('--cols', dest='cols', action='store', help = 'Get both keys and data-line columns you wanted (Default: all columns).')
    parser.add_option('--key-cols', dest='key_cols', action='store', help = 'Get keys columns you wanted (Default: all columns).')
    parser.add_option('--data-cols', dest='data_cols', action='store', help = 'Get data-line columns you wanted (Default: all columns).')
    parser.add_option('--smart', dest='smart', action='store_true', help = 'Smart identify the space in each column.')
    parser.add_option('--black-word', dest='black', action='store', help = 'Black word in data.')
    return parser.parse_args(argv)

def mongo_handle(datahandler, mongodb):
    Totalcount = 0
    Errorcount = 0
    ### Try to insert data block, if error then insert one by one and log error line
    while 1:
        try:
            da = datahandler.next()
            data = [d for d in da if d]
            if data:
                Totalcount += len(data)
                mongodb.insert_many(data)
        except StopIteration:
            break
        except InvalidDocument:
            Errorcount += mongo_insert_one(mongodb, data, logfp)
        except InvalidStringData:
            Errorcount += mongo_insert_one(mongodb, data, logfp)
        except:
            raise

    print '[+]Import %s Data Finished:\n[+]TotalLines: %d\n[+]ErrorLines: %d\n' % (datafp, Totalcount, Errorcount)

if __name__ == '__main__':
    SEPARATER = {'TAB':'\t', 'SPACE': ' '}
    opts, args = get_options(sys.argv[1:])
    #print opts
    if len(args) < 2:
        print 'Argument Error!'
        sys.exit()

    collection_name = args[0]
    datafps = args[1:]
    db = mc('localhost',27017).sgk
    collection = db.get_collection(collection_name)
    for datafp in datafps:
        dh = ImportData(datafp)
        logfp = datafp.split('/')[-1].split('.')[:-1] if '/' in datafp else datafp.split('.')[:-1]
        logfp = '_'.join(logfp)
        logfp += '_log.txt'

        if opts.log:
            logfp = opts.log
        if opts.keys:
            keys = opts.keys.split(',')
            dh.KEYS = keys
        if opts.sp:
            #separater = SEPARATER[opts.sp] if opts.sp in SEPARATER.keys() else opts.sp
            for sp in SEPARATER.keys():
                if sp in opts.sp:
                    separater = opts.sp.replace(sp,SEPARATER[sp])
            separater = opts.sp
            dh.SEPARATER = separater
        if opts.file_type:
            dh.FILE_TYPE = opts.file_type
        if opts.buffer:
            dh.BLOCK = opts.buffer
        if opts.eol:
            dh.EOL = '\r\n' if opts.eol == 'rn' else '\n'
        if opts.encoding:
            dh.ENCODING = opts.encoding
        if opts.skip:
            dh.SKIP_COLUMNS = opts.skip
        if opts.skip_key:
            dh.SKIP_KEY_COLUMNS = opts.skip_key
        if opts.skip_data:
            dh.SKIP_DATA_COLUMNS = opts.skip_data
        if opts.skip_line:
            dh.SKIP_LINES = opts.skip_line
        if opts.cols:
            dh.COLUMN_INDEXS = opts.cols
        if opts.key_cols:
            dh.KEY_COLUMNS = opts.key_cols
        if opts.data_cols:
            dh.DATA_COLUMNS = opts.data_cols
        if opts.black:
            dh.BLACK = opts.black.split(',')

        dh.LOG = logfp
        dd = dh._import()
        mongo_handle(dd, collection)