#coding:utf-8
import sys
import os
pwd = os.getcwd()
sys.path.append(pwd)

import MySQLdb
from config.config import Config
from pymongo import MongoClient
from string import maketrans
import demjson
import shutil
import argparse
import re

conf=Config()
#home_dir =  conf.get_home_dir()
intab="[].,:"
outtab="     "
trantab=maketrans(intab,outtab)

#第一步執行
"""
1、公告级数据训练
2、从mysql中取出已经标注的数据
3、从mongodb中取出原始文档
4、将标注数据还原到原始文档
5、生成model需要的标注数据
"""

reload(sys)
sys.setdefaultencoding('utf-8')

#已标注的文章
mark_article_ids = []
finace_ids = []
#从数据库中查询sql
def getMysqlConn():
    host, port, dbname, user_name, password = conf.get_mysql_conn()
    conn = MySQLdb.connect(
        host=host,
        port=int(port),
        user=user_name,
        passwd=password,
        db=dbname,
        charset='utf8',
    )
    return conn


# select a.id,a.location,b.article_id,c.src_id from  kg_notice_related a
# left join kg_notice_basic b on a.notice_id = b.id
# left join notice c on b.article_id = c.added_id
# where a.location<>'' and a.location<>'[]' and a.location<>'1' and a.location<>'0' and a.location<>'暂时没有'
#

#获取所有的标注数据
def get_mark_data():
    conn = getMysqlConn()
    cur = conn.cursor()
    sql = """select a.id,a.location,b.article_id,c.src_id from  kg_notice_related a
            left join kg_notice_basic b on a.notice_id = b.id
            left join notice c on b.article_id = c.added_id
            where a.location<>'' and a.location<>'[]' and a.location<>'1' and a.location<>'0' and a.location<>'暂时没有' and  a.subject_type not in (2,3,4,5)"""
    cur.execute(sql)
    #获取查询结果
    results = cur.fetchall()
    print "database required result:{}".format(len(results))
    cur.close()
    conn.close()
    return results

#对标注的数据进行处理，同时生成查找mongodb原始文档的ids
def deal_mark_data(mark_file):
    file = open(mark_file,'wb')
    results = get_mark_data()
    markset = set()
    for d in results:
        item = list(d)
        mark_article_ids.append(item[3])
        marks = demjson.decode(item[1])
        for mark in marks:
            a=mark.keys()
            if 'marktext' not in a :
                continue
            elif 'type' not in a :
                continue
            else:
                markset.add(item[3]+' '+mark['marktext']+' '+mark['type'])
#        print demjson.decode(d.encode('utf-8'))
    for mark in markset:
        file.write(mark+'\n')
    file.close()
    #return mark_article_ids

#获取mongodb访问连接
def getMongoClient():
    dsn = conf.get_mongodb_conn()
    client = MongoClient(dsn)
    db = client.cr_data
    return db

#获取所有mark.txt中 已经标注好的标注数据
def get_mark_info(mark_path):
    file = open(mark_path, 'r')
    lines = file.readlines()
    markdatas = []
    for line in lines:
        line = line.split(' ')
        markdatas.append(line)
    return markdatas


#获取所有标注过的原文
#filepath 原始文档输出的路径
def  get_origin_test(textPath,htmlPath,mark_path):
    db = getMongoClient()
    deal_mark_data(mark_path)
    items = db.finance.juchao.item
    markdatas = get_mark_info(mark_path)
    for item in items.find({'src_id':{'$in':mark_article_ids}}):
        finace_ids.append(item['_id'])
        for mark in markdatas:
            if mark[0] == item['src_id']:
                mark.append(str(item['_id']))

    # 重写mark.txt中的id
    mark_file = open(mark_path,'wb')
    for mark in markdatas:
        #print mark
        if len(mark)<=3:
            continue
        mark_file.write(mark[3] + ' ' + mark[1] + ' ' + mark[2])
    mark_file.close()

    #生成对应的原文文件
    texts = db.juchao_text
    for text in texts.find({'_id':{'$in':finace_ids}}):
        file = open(textPath+str(text['_id'])+'.txt','wb')
        htmlFile = open(htmlPath+str(text['_id'])+'.txt','wb')
        if 'fulltext' not in text:
            continue
        elif 'html' not in text :
            continue
        file.write(text['fulltext'])
        htmlFile.write(text['html'])
        file.close()
        htmlFile.close()

'''
#获取未被标注过的内容
def get_not_mark_origin_text():
'''

#生成新的字典
def generate_new_dict(mark_path,dict_path,dest_path):
    markdatas = get_mark_info(mark_path)
    shutil.copyfile(dict_path,dest_path)
    file = open(dest_path,'a')
    for mark in markdatas:
        if len(mark) > 1 and mark[2].strip()!='event':
            #print mark[2]
            if mark[1].find('、'):
                for m in mark[1].split('、'):
                    file.write(m+'\n')
            else:
                file.write(mark[1] + '\n')
    file.close()

def get_file_name(data_root_path, file_type):
    file_name = []
    for root, dir, files in os.walk(data_root_path):
        for file in files:
            if file_type in file:
                file_name.append(file)
    return file_name

def clear_origin_test(textPath,data_clear_path):
    file_type = '.txt'
    file_name_list = get_file_name(textPath, file_type)
    for file in file_name_list:
        input_file = open(textPath + file, 'r')
        output_file = open(data_clear_path + file, 'w')
        str = ""
        print file
        for line in input_file.readlines():
            if line != '\n':
                line = line.strip()
                line = line.translate(trantab)
                line = line.replace(" ", "")
                # remove (*) or（*）between text
                line = re.sub(r'（.*?）', '', line)
                line = re.sub(r'\(.*?\)', '', line)
                line = re.sub(r'\{.*?\}', '', line)
                if len(line) > 75:
                    str = str + line
        str = re.sub(r'。', '\n', str)
        str = re.sub(r'，', '\n', str)
        str = re.sub(r'、', '\n', str)
        output_file.write(str)
        #str = ""
        input_file.close()
        output_file.close()

if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    parser.add_argument('--home_dir_', dest='home_dir_', type=str, default='/home/yzfu/nlp/kg_abc/abc_project_data/',
                        help='path to the root dir')
    parser.add_argument('--text_Path_', dest='text_Path_', type=str, default='text_01/',
                        help='path to the origin text')
    parser.add_argument('--htmlPath_', dest='htmlPath_', type=str, default='html_01/',
                        help='path to the html_01 text')
    parser.add_argument('--mark_path_', dest='mark_path_', type=str, default='mark.txt',
                        help='path to the mark tags')
    parser.add_argument('--dict_path_', dest='dict_path_', type=str, default='dict.txt',
                        help='path to the dict')
    parser.add_argument('--dest_path_', dest='dest_path_', type=str, default='new_dict.txt',
                        help='path to the dict')
    parser.add_argument('--data_clear', dest='data_clear', type=str, default='text_02/',
                        help='path to the cleared data')
    args = parser.parse_args()
    params = vars(args)

    home_dir = params['home_dir_']
    text_path_ = params['text_Path_']
    htmlPath_ = params['htmlPath_']
    mark_path_ = params['mark_path_']
    dict_path_ = params['dict_path_']
    dest_path_ = params['dest_path_']
    data_clear_path_ = params['data_clear']

    #批量处理数据
    textPath = home_dir+text_path_
    data_clear_path = home_dir+data_clear_path_
    htmlPath = home_dir+htmlPath_
    """
    if not os.path.exists(textPath):
        os.mkdir(textPath)
    
    if not os.path.exists(htmlPath):
        os.mkdir(htmlPath)
    """
    if not os.path.exists(data_clear_path):
        os.mkdir(data_clear_path)
    htmlPath = home_dir + htmlPath_
    mark_path = home_dir+mark_path_
    dict_path = home_dir + dict_path_
    dest_path = home_dir + dest_path_

    #get non_marked origin test
    #get_origin_test(textPath,htmlPath,mark_path)
    #generate_new_dict(mark_path, dict_path, dest_path)
    print "clear origin text......"
    clear_origin_test(textPath,data_clear_path)






