
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')  # 设置默认编码格式为'utf-8'
import jieba.analyse
import pymysql
import cPickle
import sys
import os

project_path = os.getcwd()
gather_path = os.path.abspath(os.path.dirname(project_path) + os.path.sep + ".")
father_path = os.path.abspath(os.path.dirname(project_path) + os.path.sep + "..")
sys.path.append(gather_path)

from config.config import Config
from bs4 import BeautifulSoup

print ("project path :" + project_path)
print ("project father path:" + father_path)
file_type = '.txt'
event_file = father_path + "/abc_project_data/event.txt"
person_file = father_path + "/abc_project_data/person.txt"
company_file = father_path + "/abc_project_data/firm.txt"
dictPath = father_path + "/abc_project_data/sql_dict.txt"
loading_dict_path = father_path + "/abc_project_data/loading_dict_path.txt"
stopword = father_path + "/abc_project_data/stopword.dict"
data_input_path = father_path + "/abc_project_data/data_12_3/data/"
#data_input_path = father_path + "/abc_project_data/unmark_html/"
data_output_path = father_path + "/abc_project_data/marked_html_12_3/"
Write_flag=True

if Write_flag:
    if os.path.exists(dictPath):
        os.remove(dictPath)
    if os.path.exists(loading_dict_path):
        os.remove(loading_dict_path)
    sql_dict = open(dictPath,'w')
jieba.analyse.set_stop_words(stopword)

conf = Config()

def getMysqlConn(db):
    if db=='test':
        host, port, dbname, user_name, password = conf.get_mysql_test()
    else :
        host, port, dbname, user_name, password = conf.get_mysql_conn()

    conn = pymysql.connect(
        host=host,
        port=int(port),
        user=user_name,
        passwd=password,
        db=dbname,
        charset='utf8',
    )
    return conn

def extractor_list(sql_text,db,write_flag):
    conn = getMysqlConn(db)
    cur = conn.cursor()
    cur.execute(sql_text)
    word_list = cur.fetchall()
    # print "jieguo count:".count(results)
    for word in word_list :
        if write_flag:
            sql_dict.write(word[0].encode('utf-8') + '\n')
    cur.close()
    conn.close()
    return word_list

def get_file_name(data_root_path, file_type):
    file_name = []
    for root, dir, files in os.walk(data_root_path):
        for file in files:
            if file_type in file:
                file_name.append(file)
    return file_name

def check_contain_chinese(check_str):
    for ch in check_str:
        if u'\u4e00' <= ch <= u'\u9fff':
            return True
    return False

def extractor_dict(sql_text,db,flag):
    sub_dict = {}
    word_list=extractor_list(sql_text,db,Write_flag)
    # print "jieguo count:".count(results)
    for word in word_list:
        if check_contain_chinese(word[0]):
            sub_dict[word[0].encode('utf-8')] = flag
    return sub_dict

def load_dict(file_name,flag,write_dict):
    dict_tmp={}
    for line in open(file_name,'rb').readlines():
        line = line.strip().decode('utf-8')
        if (len(line)>6 and flag=='company') or flag!='company':
            if write_dict:
                sql_dict.write(line.encode('utf-8') + '\n')
            dict_tmp[line.encode('utf-8')]=flag
    return dict_tmp

def product(input_txt, dict_all,find_dict):
    word_list = list(set(jieba.cut(input_txt, cut_all=False)))
    for word in word_list :
        if word.encode('utf-8') in dict_all:
            find_dict[dict_all[word.encode('utf-8')]].append(word.encode('utf-8'))
    return find_dict

def main():
    dict_all = {}
    sql_company_name = '''SELECT company_name FROM company_basicinfo group by company_name'''
    company_dict_1 = extractor_dict(sql_company_name, 'test', 'company')

    if Write_flag:
        event_dict = load_dict(event_file,'event',Write_flag)
        person_dict = load_dict(person_file, 'person',Write_flag)
        company_dict_2 = load_dict(company_file, 'company',Write_flag)
        dict_all.update(event_dict)
        dict_all.update(company_dict_1)
        dict_all.update(person_dict)
        dict_all.update(company_dict_2)
        with open(loading_dict_path, "w") as f:
            cPickle.dump(dict_all, f)
    else:
        print "loading industry dict..."
        dict_all = cPickle.load(open(loading_dict_path, "rb"))
    jieba.load_userdict(dictPath)

    file_name_list = get_file_name(data_input_path, file_type)
    for file in file_name_list:
        print file
        input_html = open(data_input_path + file, 'r').read().decode('utf-8')
        soup = BeautifulSoup(input_html, 'lxml')
        origin_text = soup.get_text().encode('utf-8')
        find_dict = {}
        find_dict['company'] = []
        find_dict['event'] = []
        find_dict['person'] = []
        find_dict = product(origin_text, dict_all, find_dict)

        if find_dict['company'] == [] and find_dict['event'] == [] and find_dict['person'] == []:
            pass
        else:
            output_file = open(data_output_path + file, 'w')
            for key in find_dict.keys():
                output_file.write(key + ":" + " ".join(find_dict[key]) + '\n')

if __name__ == "__main__":
    main()