# -*- coding:utf-8 -*-
import argparse
import pymysql
import json
import re
import time
a = time.time()
__all__ = ['text_filter']
__author__ = 'chengyonjun，huangkan，luxiaoman'
__date__ = '2018.08'
# 处理命令行参数，使用 argparse 库
parser = argparse.ArgumentParser()
# 定义脚本描述信息
parser.description = '传入一段文字，输入过滤后的文本和原有文本和状态'
# 定义脚本版本
# parser.add_argument("-v", "--version", action='version', version='%(prog)s 1.0')
# 添加text参数，必填
parser.add_argument('text', help='text')
# 添加数据库参数
parser.add_argument('host', help='host')
parser.add_argument('user', help='user')
parser.add_argument('passwd', help='passwd')
parser.add_argument('db', help='db')
# 将参数命名为变量 args
args = parser.parse_args()
#设置默认参数
text = args.text
# 数据库配置
host = args.host
user = args.user
passwd = args.passwd
db = args.db

keyword_chains = {}
delimit = '\x00'
#添加敏感词进行遍历分解
def add(keyword):
    if not isinstance(keyword, str):
        keyword = keyword.encode('utf-8')
    chars = keyword.strip()
    if not chars:
        return
    level = keyword_chains

    for i in range(len(chars)):
        if chars[i] in level:
            level = level[chars[i]]
        else:
            if not isinstance(level, dict):
                break
            for j in range(i, len(chars)):
                level[chars[j]] = {}
                last_level, last_char = level, chars[j]
                level = level[chars[j]]
            last_level[last_char] = {delimit: 0}
            break
    if i == len(chars) - 1:
        level[delimit] = 0
#导入敏感词数据库数据
conn = pymysql.connect(host=host, user=user, passwd=passwd, db=db, port=3306,charset='utf8')
cur = conn.cursor()  # 获取一个游标
cur.execute('select * from ivy_mgck')
data = cur.fetchall()
for keyword in data:
    keywords = "".join(keyword)
    add(keyword=keywords.strip())
cur.close()  # 关闭游标
conn.close()  # 释放数据库资源
#对文本进行过虑匹配并把含有敏感词进行替换为*
def filter(text, repl="*"):
    new_text = "".join(re.findall(r'[\u4e00-\u9fa5,A-Za-z0-9\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\\\'\:\;\[\]\.\<\>\/\?\~\！\@\#\￥\…\&\*\（\）\—\|\{\}\【\】\‘\；\：\”\“\。\，\、\？]', text))
    if not isinstance(new_text, str):
        new_text = new_text.encode('utf-8')
    ret = []
    start = 0
    while start < len(new_text):
        level = keyword_chains
        step_ins = 0
        for char in new_text[start:]:
            if char in level:
                step_ins += 1
                if delimit not in level[char]:
                    level = level[char]
                else:
                    ret.append(repl * 2)
                    start += step_ins - 1
                    break
            else:
                ret.append(new_text[start])
                break
        else:
            ret.append(new_text[start])
        start += 1
    res_ = [str(i) for i in ret]
    if new_text ==''.join(res_):
        s = 1
    else:
        s = 0
    data = {"status":s ,"text": "".join(res_),"former_text":text}
    result = json.dumps(data)
    return result

#输出状态码，过滤文本，原始文本
print(filter(text))
# print(time.time()-a)