# coding:utf-8
import argparse
import os
import re
import fulltext
from io import BytesIO
import docx
import logging
import gevent
import sys
import xlrd
import time
from gevent import monkey

monkey.patch_all()
import pysequoiadb
from pysequoiadb import client
from pysequoiadb.error import SDBBaseError
from pysequoiadb.error import SDBEndOfCursor
import pandas as pd

flags = argparse.ArgumentParser()
flags.add_argument("--species_flag", type=int, default=0, help="num of flag")
flags.add_argument("--sdb_ip", type=str, help="sdb ip")
args = flags.parse_args()

suffix = ['txt', 'csv', 'doc', 'docx', 'xlsx', 'xls']
reason = ["can't read","others file"]
# sdb_ip = "10.244.171.142"
sdb_ip = args.sdb_ip
sdb_port = "11810"
sdb_user = "peter"
sdb_password = "Foxconn99."
ocr_space_name = "ocr"
ocrlob_space_name = "ocrlob"
ocr_collection = "ocrsource"
ocrlob_collection = "ocrsourcelob"
sensitive_collection = "sensitive"
sensitive_words = {"上午", "测试", '加班', '人', '天', '的'}
fileids = []
logging.basicConfig(filename="/home/hadoop/execu/run.log",
                    level=logging.INFO,
                    format='%(levelname)s %(message)s',
                    filemode='w')

SUM_ = 0
eight = [0]


def chunk_read(lob, size):
    def core_read(thres):
        door = size / thres
        four = door
        t_t = 0
        t_str = ""
        i = 0
        while i < thres:
            t_str += lob.read(door)
            t_t += door
            lob.seek(four)
            if four + door < size:
                four += door
            else:
                break
            i += 1
        if four < size:
            t_str += lob.read(size - four)
            t_t += size - four
        print '等:?', t_t == size
        return t_str

    print 'chunk read'
    if size < 500000:
        return lob.read(size)
    elif size < 5000000:
        return core_read(4)
    else:
        return core_read(8)


def final_deal(lo, ocr):
    print('-' * 20)
    global SUM_
    lob = lo.get('lob')
    name = lo.get('name')
    name = name.encode("utf8")
    id_ = lo.get('_id')
    size = lob.get_size()
    try:
        print('执行', name)
        t_start = time.time()
        out = chunk_read(lob, size)
        print('chunk read end!!')
        logging.info("{0} 读取对象时间为: {1:.4f}".format(name,
                                                   time.time() - t_start))
        t_lob = BytesIO()
        t_lob.write(out)
        t_dic = {"name": name, "lob": t_lob, 'id': id_}
        filter_sensitive(t_dic, sensitive_words, ocr)
        print('already:', name)
        logging.info("{0} 处理完一个时间为: {1:.4f}".format(name,
                                                    time.time() - t_start))

        SUM_ += 1
    except:
        print(lo.get('name'), 'final error ')
        pass


def regular_filter(content, words):
    words = set(words)
    cp_words = dict()
    kw = []
    for word in words:
        try:
            word = unicode(word)
        except UnicodeDecodeError:
            word = word.decode('utf8')
        cp_words[word] = re.compile(word)
    for word, word_pattern in cp_words.items():
        if re.search(word_pattern, content):
            kw.append(word)
        else:
            continue
    del cp_words
    return kw


def filter_sensitive(lob_dict, words, cs):
    name = lob_dict.get("name")
    sfx = name.rsplit(".")[-1]
    if sfx in suffix[0:2]:
        deal_txt_or_csv(lob_dict, words, cs)
    elif sfx in suffix[2:4]:
        deal_doc(lob_dict, words, cs)
    elif sfx in suffix[4:6]:
        deal_excel(lob_dict, words, cs)
    else:
        deal_others(lob_dict, cs)


def deal_others(file, cs):
    rule = {"$set": {"species_flag": 77,
                     "sensitive_words": [reason[1]]}}
    cs.update(rule, condition={"_id": file.get("id")})


def deal_txt_or_csv(file, words, cs):
    try:
        print 'deal txt csv'
        t_start = time.time()
        lob = file.get("lob")
        content = lob.getvalue()
        kj = regular_filter(content, words)
        logging.info("{0} 正则处理时间:{1:.4f}".format(file.get('name'), time.time() - t_start))
        print 'file:', kj, '\n'
        rule = {"$set": {"species_flag": 88,
                         "sensitive_words": kj}}
        eight[0] += 1
        cs.update(rule, condition={"_id": file.get("id")})
    except:
        rule = {"$set": {"species_flag": 77,
                         "sensitive_words": [reason[0]]}}
        cs.update(rule, condition={"_id": file.get("id")})


def deal_doc(file, words, cs):
    print('deal docx')
    lob = file.get("lob")  # lob:ByteIO
    name = file.get("name")
    sfx = name.rsplit(".")[-1]
    t_start = time.time()
    if sfx == "doc":
        print('deal doc')
        output = fulltext.get(lob, None)
        if output is not None:
            doc_str = output  # unicode 返回
            kj = regular_filter(doc_str, words)
            logging.info("{0} 正则处理时间:{1:.4f}".format(name, time.time() - t_start))
            print('file:', kj, '\n')
            rule = {"$set": {"species_flag": 88,
                             "sensitive_words": kj}}
            eight[0] += 1
        else:
            rule = {"$set": {"species_flag": 77,
                             "sensitive_words": [reason[0]]}}
        cs.update(rule, condition={"_id": file.get("id")})

    else:
        try:
            doc = docx.Document(lob)
        except:
            print(file.get("name"), 'docx error')
            rule = {"$set": {"species_flag": 77,
                             "sensitive_words": [reason[0]]}}

        else:
            doc_str = ""
            table_str = ""
            for table in doc.tables:
                for row in table.rows:
                    for cell in row.cells:
                        table_str += unicode(cell.text)
            for par in doc.paragraphs:
                doc_str += " " + unicode(par.text)
            doc_str += ' ' + table_str
            kj = regular_filter(doc_str, words)
            logging.info("{0} 正则处理时间:{1:.4f}".format(name, time.time() - t_start))
            print('file:', kj, '\n')
            rule = {"$set": {"species_flag": 88,
                             "sensitive_words": kj}}
            eight[0] += 1
        finally:
            cs.update(rule, condition={"_id": file.get("id")})


# REW:stack_panic_level=15
def deal_excel(file, words, cs):
    print 'deal excel'
    lob = file.get("lob")
    out = lob.getvalue()
    try:
        excel = xlrd.open_workbook(file_contents=out)
    except xlrd.biffh.XLRDError as e:
        t_start = time.time()
        try:
            excel = pd.read_html(io=out)[0]
        except:
            print('lob value all read')
            rule = {"$set": {"species_flag": 77,
                             "sensitive_words": [reason[0]]}}
            cs.update(rule, condition={"_id": file.get("id")})
        else:
            print('xls deal done')
            print(type(excel), excel.index)
            xls_str = ""
            for ind in excel.index:
                xls_str += ''.join(excel.ix[ind].values[0:-1])
            print('xls read done')
            kj = regular_filter(xls_str, words)
            logging.info("{0} 正则读取处理时间:{1:.4f}".format(file.get('name'), time.time() - t_start))

            print('file:', kj, '\n')
            rule = {"$set": {"species_flag": 88,
                             "sensitive_words": kj}}
            eight[0] += 1
            cs.update(rule, condition={"_id": file.get("id")})
    except:
        print(file.get("name"), 'excel error')
        rule = {"$set": {"species_flag": 77,
                         "sensitive_words": [reason[0]]}}
        cs.update(rule, condition={"_id": file.get("id")})
    else:
        sheets = excel.sheets()
        kj = []

        def deal_sheet(sheet):
            row_str = ""
            for rownum in range(sheet.nrows):
                for value in sheet.row_values(rownum):
                    try:
                        value = unicode(value)
                    except:
                        continue
                    if len(value) == 0:
                        continue
                    row_str += value
            return regular_filter(row_str, words)

        t_start = time.time()

        for sheet in sheets:
            kd = deal_sheet(sheet)
            kj.extend(kd)
        logging.info("{0} 正则读取处理时间:{1:.4f}".format(file.get('name'), time.time() - t_start))

        print('file:', kj, '\n')
        rule = {"$set": {"species_flag": 88,
                         "sensitive_words": kj}}
        eight[0] += 1
        cs.update(rule, condition={"_id": file.get("id")})


def filter_file(orname):
    sfx = os.path.splitext(orname)[-1]
    if sfx[1:] in suffix:
        return True
    return False


def build_record(record):
    tmp = {"OriginalFileName": record.get("OriginalFileName"),
           "fileid": record.get("fileid"),
           "_id": record.get("_id")}
    return tmp


def build_recod(record):
    tmpfile = record.get("OriginalFileName")
    if filter_file(tmpfile):
        tmp = {"OriginalFileName": tmpfile,
               "fileid": record.get("fileid"),
               "_id": record.get("_id")}
        return tmp


def get_fileids(cursor):
    try:
        while True:
            try:
                record = cursor.next()
                tmp = build_record(record)
                fileids.append(tmp)
            except SDBEndOfCursor:
                break
            except SDBBaseError, e:
                pysequoiadb._print(e)
                break
    except:
        cursor.close()


def get_sensitivate_words(sensitives):
    global sensitive_words
    cursor_s = sensitives.query()
    # 敏感词
    try:
        while True:
            try:
                record = cursor_s.next()
                sensitive_words.add(record.get('word'))
            except SDBEndOfCursor:
                break
            except SDBBaseError, e:
                pysequoiadb._print(e)
                break
    except SDBBaseError, e:
        cursor_s.close()
        pysequoiadb._print(e)
        sys.exit(0)


def main(flag=0):
    try:
        flag = int(flag)
    except:
        print('special_flag 有问题 请检查')
        sys.exit(0)
    print('start')
    # # print "cpu核心数:", mp.cpu_count()
    try:
        db = client(sdb_ip, sdb_port, sdb_user, sdb_password)
    except SDBBaseError as e:
        pysequoiadb._print(e)
        print("连接出错 请校验")
        sys.exit(0)
    co = db.get_collection_space(ocr_space_name)
    col = db.get_collection_space(ocrlob_space_name)
    try:
        ocr = co.get_collection(ocr_collection)
        ocrlob = col.get_collection(ocrlob_collection)
        sensitives = co.get_collection(sensitive_collection)
    except SDBBaseError as e:
        print('sfsdf')
        pass
    else:
        print('get query')
        t_start = time.time()

        # 所有符合条件的
        cursor = ocr.query(condition={'species_flag': flag})
        get_sensitivate_words(sensitives)
        get_fileids(cursor)
        logging.info("所有文件选择完毕时间:{0:.4f}".format(time.time() - t_start))

        lobs = []

        for file in fileids:
            if not file.get('fileid'):
                continue
            fileid = file.get("fileid").encode("utf8")
            tname = file.get("OriginalFileName")
            # tname = tname.rsplit("\\")
            lobs.append({'name': tname,
                         'lob': ocrlob.get_lob(fileid),
                         '_id': file.get('_id')})
        print("lobs 个数: ", len(lobs))
        start = time.time()
        # for lo in lobs:
        #     final_deal(lo,ocr)
        gpls = [gevent.spawn(final_deal, lo, ocr) for lo in lobs]
        gevent.joinall(gpls)
        end = time.time()
        print(SUM_)
        print('88:', eight[0])
        print('运行时间：', end - start)


if __name__ == "__main__":
    main(args.species_flag)
