#coding:utf-8
import os
import sys
import pymysql
import argparse
import jieba.analyse
pwd = os.getcwd()
sys.path.append(pwd)
from config.config import Config


conf=Config()

parser = argparse.ArgumentParser()

parser.add_argument('--home_dir_', dest='home_dir_', type=str, default='/home/yzfu/nlp/kg_abc/fyz_kg_nlp/abc_project_data/',
                    help='path to the root dir')
parser.add_argument('--text_Path_', dest='text_Path_', type=str, default='text_02/',
                    help='path to the origin text')
parser.add_argument('--product_03_', dest='product_03_', type=str, default='text_03/',
                    help='path to the origin text')
parser.add_argument('--mark_path_', dest='mark_path_', type=str, default='mark.txt',
                        help='path to the mark tags')
parser.add_argument('--extract_dict', dest='extract_dict', type=bool, default=True,
                        help='whether extract sql dict')
parser.add_argument('--wordseg_dict', dest='wordseg_dict', type=str, default='new_dict.txt',
                        help='path to the wordseg dict')
parser.add_argument('--stopword', dest='stopword', type=str, default='stopword.dict',
                        help='path to the stopword dict')
args = parser.parse_args()

params = vars(args)

home_dir = params['home_dir_']
text_path_ = params['text_Path_']
product_03_ =params['product_03_']
mark_path_ = params['mark_path_']
extract_dict_flag = params['extract_dict']
stopword = home_dir+params['stopword']
wordseg_dict = home_dir+params['wordseg_dict']

jieba.load_userdict(wordseg_dict)
jieba.analyse.set_stop_words(stopword)  # file_name为自定义语料库的路径

mark_path = home_dir+mark_path_
origin_text = home_dir + text_path_
product_03_ = home_dir + product_03_
dict_file = home_dir + "event.txt"
print "mark_path:{}".format(mark_path)
#第二部執行，生成tag文件
json_dict={}
if not os.path.exists(product_03_):
    os.mkdir(product_03_)
if not os.path.exists(home_dir + 'test_03/'):
    os.mkdir(home_dir + 'test_03/')


def fyz_get_mark_info(mark_path):
    dict={}
    file = open(mark_path, 'r')
    lines = file.readlines()
    for line in lines:
        line = line.strip().split(' ')
        if line[-1]=='product' or line[-1]=='business'  or line[-1]=='person' or line[-1]=='agency'or line[-1]=='industry' :
            try:
                if len(line[1])>9 and line[-1]!='person':
                    dict[line[1]]=line[-1]
                elif  line[-1] == 'person':
                    dict[line[1]] = line[-1]
            except IndexError :
                continue
    #print dict
    return dict

#sql_dict = json.load(open(home_dir+'sql_dict.json','r'),encoding='utf-8')
def get2Tag(li):
    length = len(li)
    #print length
    if length == 1:
        return ['B']
    elif length == 2:
        return ['B', 'I']
    elif length > 2:
        li = list()
        li.append('B')
        for i in range(0, length - 2):
            li.append('I')
        li.append('I')
        return li

def hash_fyz_pos_word(path,tag_output,hash_dict):
    #读取文件
    file_data= open(path,'r').read()
    ner_token = []
    sentences_token=[]
    fout = open(tag_output, 'w')
    word_list = jieba.cut(file_data, cut_all=False)
    num_tags_O=0
    num_words=0
    num_flag=0
    for words in word_list :
        if words == '\n':
            if num_tags_O!=num_words and num_flag>=2 and  num_words>15  and num_words<200:
                sentences_token.extend(words)
                ner_token.extend(sentences_token)
            num_tags_O = 0
            num_flag = 0
            num_words=0
            sentences_token=[]
        else:
            try:
                if words.encode('utf-8') in hash_dict:
                    flag = hash_dict[words.encode('utf-8')]
                    num_flag=num_flag+1
                else:
                    flag ='O'
            except KeyError:
                flag = 'O'
            BI_list = get2Tag(words)
            if flag != 'O':
                for char, BI in zip(list(words), BI_list):
                    num_words=num_words+1
                    sentences_token.extend(char.encode('utf-8') + " " + BI + '-' + flag + '\n')
            else:
                for char, BI in zip(list(words), BI_list):
                    num_words=num_words+1
                    num_tags_O = num_tags_O + 1
                    sentences_token.extend(char.encode('utf-8') + " " + flag + '\n')
    fout.writelines(ner_token)
    fout.close()

def productTag(orgin_path,hash_dict):
    file_name = orgin_path.split("/")[-1]
    testpath = product_03_+file_name
    #print  '==============================='+file_name+'==============================================='
    hash_fyz_pos_word(orgin_path,testpath,hash_dict)
    print '文件：'+file_name

def eachFile(filepath):
    pathDir = os.listdir(filepath)
    file_names = []
    for allDir in pathDir:
        child = os.path.join('%s%s' % (filepath, allDir))
        child.decode('gbk')  # .decode('gbk')是解决中文显示乱码问题
        file_names.append(child)
    print '原始文档目录扫描完成'
    return file_names

def getDevMysqlConn(db):
    if db=='test':
        host, port, dbname, user_name, password = conf.get_mysql_test()
    else :
        host, port, dbname, user_name, password = conf.get_mysql_conn()

    conn = pymysql.connect(
        host=host,
        port=int(port),
        user=user_name,
        passwd=password,
        db=dbname,
        charset='utf8',
    )
    return conn

def check_contain_chinese(check_str):
    for ch in check_str.decode('utf-8'):
        if u'\u4e00' <= ch <= u'\u9fff':
            return True
    return False

"""
def extractor_dict(sql_text,db,flag):
    conn = getDevMysqlConn(db)
    cur = conn.cursor()
    cur.execute(sql_text)
    word_list = cur.fetchall()
    # print "jieguo count:".count(results)
    if flag!='event' :
        for word in word_list:
            if len(word[0].encode('utf-8'))>4:
                json_dict[word[0].encode('utf-8')]=flag
    else:
        for word in word_list:
            if flag=='person' and check_contain_chinese(word[0])==False :
                continue
            json_dict[word[0].encode('utf-8')]=flag
    cur.close()
    conn.close()
    return json_dict
"""
def extractor_dict(sql_text,db,flag):
    if db=='sql':
        conn = getDevMysqlConn(db)
        cur = conn.cursor()
        cur.execute(sql_text)
        word_list = cur.fetchall()
        for word in word_list:
            if len(word[0].encode('utf-8'))>4:
                json_dict[word[0].encode('utf-8')]=flag
        cur.close()
        conn.close()
    else:
        file = open(dict_file,"r")
        for line in file.readlines():
            json_dict[line.strip()] = flag
    return json_dict

def segment_doc(hash_dict):
    file_names = eachFile(origin_text)
    for file_name_path in file_names:
        productTag(file_name_path,hash_dict)
    print '文件标注完成！！'

if __name__ == "__main__":
    if extract_dict_flag :
        print "extract sql dict..."
        sql_company_name = '''SELECT company_name FROM company_basicinfo group by company_name'''
        #sql_person_name = '''SELECT  name FROM people_basicinfo group by name'''
        sql_industry_name = '''SELECT  first_indu_name FROM sec_industry_new group by first_indu_name union
            SELECT  second_indu_name FROM sec_industry_new group by second_indu_name union
            SELECT  third_indu_name FROM sec_industry_new group by third_indu_name union
            SELECT  fourth_indu_name FROM sec_industry_new group by fourth_indu_name 
            '''
        sql_product_name = '''SELECT com_product FROM kg_com_product group by com_product'''
        sql_business_name = '''SELECT com_business FROM kg_com_business group by com_business'''
        sql_event_name = '''SELECT event_name FROM kg_event WHERE length(event_name)<16 group by event_name '''
        extractor_dict(sql_company_name,'sql', 'agency')
        #extractor_dict(sql_person_name, 'test', 'person')
        extractor_dict(sql_industry_name, 'sql','industry')
        extractor_dict(sql_product_name, 'sql','product')
        extractor_dict(sql_business_name, 'sql','business')
        extractor_dict(sql_event_name, 'text','event')
        #extractor_dict(sql_event_name, 'online', 'event')
    mark_dict = fyz_get_mark_info(mark_path)
    hash_dict=dict(mark_dict,**json_dict)
    segment_doc(hash_dict)



