#coding:utf-8
import os
import re
import sys
import shutil
import argparse

pwd = os.getcwd()
sys.path.append(pwd)

from ner import Ner_recognition,get_tags_dict
from model.configner import ConfigNer
from model.ner_model import NERModel
from string import maketrans


intab="[].,:"
outtab="     "
trantab=maketrans(intab,outtab)


parser = argparse.ArgumentParser()

parser.add_argument('--result_dataset', dest='result_dataset', type=str, default='/ner_html_result/',
                    help='path to the result_dataset')
parser.add_argument('--home_dir_', dest='home_dir_', type=str, default='/home/yzfu/nlp/kg_abc/fyz_kg_nlp/abc_project_data',
                    help='path to the root dir')
parser.add_argument('--html_dataset', dest='html_dataset', type=str, default='/tmp_article/',
                    help='path to the html_dataset')
parser.add_argument('--tags_path', dest='tags_path', type=str, default='/tags.txt',
                    help='path to the tags dict')


args = parser.parse_args()
params = vars(args)
home_dir_ = params['home_dir_']
result_dataset = home_dir_+params['result_dataset']
html_dataset = home_dir_+params['html_dataset']
tags_path = home_dir_+params['tags_path']

print "restore model......"
config = ConfigNer()
model = NERModel(config)
model.build()
model.restore_session(config.dir_model)
ner = Ner_recognition()
print "loading tags......"

def get_file_name(data_root_path,file_type):
    file_name=[]
    for root ,dir, files in os.walk(data_root_path):
        for file in files :
            if file_type in file :
                file_name.append(file)
    return file_name


def Alignment_result(word_entity):
    word=[]
    entity=[]
    for i,token in enumerate(word_entity):
        word.append(word_entity[i][0])
        entity.append(word_entity[i][-1])
    return word,entity
def get_token(words, tag_result):
    insert_words =""
    word_list = []
    insert_tags = []
    word_entity_result = []
    for i,tag in enumerate(tag_result):
        if tag == 'O':
            continue
        elif i <= len(tag_result) - 2:
            tag_list = tag.split('-')
            if tag_list[0]=='B' and tag_result[i+1].split('-')[0]=='B':
                word_list.append(words[i])
                insert_tags.append(tag_list[1])
            elif tag_list[0]=='I' and (tag_result[i+1].split('-')[0]=='B' or tag_result[i+1].split('-')[0]=='O'):
                insert_words = insert_words + words[i]
                word_list.append(insert_words)
                insert_tags.append(tag_list[1])
                insert_words=""
            else:
                insert_words=insert_words+words[i]
        else:
            tag_list = tag.split('-')
            insert_words = insert_words + words[i]
            word_list.append(insert_words)
            insert_tags.append(tag_list[1])
            insert_words = ""
    assert len(word_list)==len(insert_tags)
    for w, t in zip(word_list, insert_tags):
        word_entity_result.append("/".join([w, t]))
    return word_entity_result


def clear_origin_test(data_list):
    str = ""
    clear_list = []
    for line in data_list:
        if line != '\n' and line!="":
            line = line.strip()
            line = line.translate(trantab)
            line = line.replace(" ", "")
            # remove (*) or（*）between text
            line = re.sub(r'（.*?）', '', line)
            line = re.sub(r'\(.*?\)', '', line)
            line = re.sub(r'\{.*?\}', '', line)

            if len(line) > 45 and len(line)< 600:
                clear_list.append(line.decode('utf-8'))
            elif len(line) > 600:
                line = re.sub(r'。', '\n', line)
                line = re.sub(r'，', '\n', line)
                line = re.sub(r'、', '\n', line)
                line_list = line.split('\n')
                for line_ in line_list :
                    if len(line_) > 45:
                        clear_list.append(line_.decode('utf-8'))
    #clear_list=str.split('\n')
    return clear_list

def product_line(ner,model,line):
    word_entity = ner.ner_model(model, line)
    return word_entity


def product_file(file_name,tags_dict):
    input_file = html_dataset + file_name
    print input_file
    output_file = result_dataset + file_name
    tags_result = []
    output = ""
    input_file = open(input_file, 'r')
    output_file = open(output_file, 'w')
    clear_list=clear_origin_test(input_file.read().split('\n'))
    for line in clear_list:
        try:
            if line != "":
                tag_result = ner.ner_model(model=model, senceten=line)
                # print tags_result
                words,entity = Alignment_result(tag_result)
                tag_result=ner.check_ner(entity)
                result = get_token(words, tag_result)
                if result!=[]:
                    tags_result.append(result)
        except IndexError:
            continue
    for line in tags_result:
        for word in line :
            word_list = word.split('/')
            #output=output+word_list[0].encode('utf-8')+" "+word_list[-1]+'\n'
            if word_list[-1].encode('utf-8') in tags_dict.keys():
                if word_list[0].encode('utf-8') not in tags_dict[word_list[-1]]:
                    tags_dict[word_list[-1]].append(word_list[0].encode('utf-8'))
        #output=output+'\n'
    sorted(tags_dict.items(), key=lambda e: e[0], reverse=True)
    #print tags_dict
    for key in tags_dict:
        if  key=='O' :
            continue
        else:
            #output_file.write(key + ":" + " ")
            output_file.write(key + ":" + " "+" ".join(tags_dict[key])+'\n')
    output_file.close()

def work():
    file_type = '.txt'
    file_name_list = get_file_name(html_dataset, file_type)
    for file in file_name_list:
        tags_file = open(tags_path, 'r')
        tags_dict = get_tags_dict(tags_file)
        product_file(file,tags_dict)
        tags_file.close()


    # for file in file_name_list:
    # product_file(file)

def main():
    if os.path.isdir(result_dataset):
        shutil.rmtree(result_dataset)
    os.mkdir(result_dataset)
    work()

if __name__ == "__main__":
    main()