#coding=utf8
import re
import sys
import os
root_path=os.getcwd()
sys.path.append(root_path)
from model.configner import ConfigNer
from model.ner_model import NERModel
from string import maketrans
#from train_data.genera_data import fyz_pos_tag_word
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
class Ner_recognition():
    def __init__(self):
        pass
    def ner_model(self,model,senceten):
        result=[]
        sens = list(senceten)
        if sens==[]:
            pass
        else:
            preds = model.predict(sens)
            for word, lab in zip(sens, preds):
                if word == " ":
                    continue
                result.append([word, lab])
        return result

    def clear_file(self,line):
        intab = "[].,:"
        outtab = "     "
        trantab = maketrans(intab, outtab)
        line = line.translate(trantab)
        line = line.strip()
        #remove " " between the word
        line=line.replace(" ","")
        #remove (*) or（*）between text
        line = re.sub(r'\(.*?\) |（.*?）','', line)
        return line

    #def Alignment_result(pos_result,ner_result):
    def Alignment_result(self,pos_result,word_entity):
        word=[]
        pos=[]
        entity=[]
        for i,token in enumerate(pos_result):
            word.append(word_entity[i][0])
            entity.append(word_entity[i][-1])
            pos.append(pos_result[i][-1])
        return word,pos,entity

    #check ner result: delete error result
    #eg:I-time,I-time...
    def check_ner(self,word_entity):
        check_result=[]
        for i,ner in enumerate(word_entity):
            ner_token_list = ner.split('-')
            if i==0:
                if ner_token_list[0]=='I':
                    ner = 'O'
            else:
                if ner_token_list[0]=='I':
                    if check_result[-1].split('-')[0]=='O':
                        ner = 'O'
                    elif ner_token_list[1]!=word_entity[i-1].split('-')[1]:
                        ner = 'O'
            check_result.append(ner)
        return check_result
    #merge the possing and Name entity result
    def merge_token(self,word_list, pos_list, entity_list):
        tag_result=[]
        for i,word in enumerate(word_list):
            if entity_list[i]=='O':
                tag_result.append(pos_list[i])
            else:
                tag_result.append(entity_list[i])
        return tag_result

    def check_merge(self,tag_result):
        check_result = []
        for i,tag in enumerate(tag_result):
            tag_token_list = tag.split('-')
            if i==0:
                if tag_token_list[0] == 'I':
                    tag_token_list[0] = 'B'
            else:
                if tag_token_list[0] == 'I':
                    if tag_token_list[-1]!=check_result[-1].split('-')[-1]:
                        tag_token_list[0] = 'B'
            token = "-".join(tag_token_list)
            check_result.append(token)

        return check_result
    #get result token
    def get_token(self,words,checked_tag):
        result = []
        tags_list=[]
        word_list=[]
        tag_list = []
        insert_space_word = []
        for i,tag in enumerate(checked_tag):
            #insert_space_tag.append(tag)
            insert_space_word.append(words[i])
            tag_list = tag.split('-')
            if i<=len(checked_tag)-2:
                if (tag_list[0]=='I' and checked_tag[i+1].split('-')[0]=='B') or  (tag_list[0]=='B' and checked_tag[i+1].split('-')[0]=='B') :
                    tags_list.append(tag_list[1])
                    word_list.append("".join(insert_space_word))
                    insert_space_word = []
            else:
                tags_list.append(tag_list[1])
                word_list.append("".join(insert_space_word))
        if  word_list==[] and tags_list==[]:
            tags_list.append(tag_list[1])
            word_list.append("".join(insert_space_word))
        assert len(tags_list)==len(word_list)
        for w,t in zip(word_list,tags_list):
            result.append("/".join([w,t]))
        return result

#clear,pos,ner,merge
def product_line(ner,model,line,mark_dict):
    # clear data_file
    line =ner.clear_file(line)
    if line=="":
        return ""
    # pos the line
    word_pos,_line_,char_pos= fyz_pos_tag_word(line,mark_dict)
    # ner
    word_entity = ner.ner_model(model, _line_)

    # Alignment pos and ner result
    assert len(char_pos)==len(word_entity)
    word, pos, entity = ner.Alignment_result(char_pos,word_entity)

    # check ner result
    check_ner_result = ner.check_ner(entity)

    #merge pos and entity
    assert len(word)==len(pos)==len(check_ner_result)
    merge_result = ner.merge_token(word, pos, check_ner_result)

    # check merge result
    checked_tag = ner.check_merge(merge_result)

    # get token
    result = ner.get_token(word,checked_tag)
    return result


def product_file(ner,input_file_,model):
    tags_result = []
    tags_file = open("data/tags.txt", 'r')
    tags_dict = get_tags_dict(tags_file)
    output=""
    input_file = open(input_file_, 'r')
    for line in input_file.readlines():
        line = line.strip()
        if line != "":
            tag_result = product_line(ner,model,line)
            #print tags_result
            tags_result.append(tag_result)
    for line in tags_result:
        for word in line :
            word_list = word.split('/')
            output=output+word_list[0].encode('utf-8')+" "+word_list[-1]+'\n'
            if word_list[-1] in tags_dict.keys():
                if word_list[0] not in tags_dict[word_list[-1]]:
                    tags_dict[word_list[-1]].append(word_list[0])
        output=output+'\n'
    return output,tags_dict

def product_str(ner,str,model):
    tags_result = []
    tags_file = open("data/tags.txt", 'r')
    tags_dict =  get_tags_dict(tags_file)
    output = ""
    input_list = re.split(u'[,.，。]', str.decode('utf-8'))
    for line in input_list:
        line = line.encode('utf-8')
        line = line.strip()
        if line != "":
            tag_result = product_line(ner, model, line)
            # print tags_result
            tags_result.append(tag_result)
    for line in tags_result:
        for word in line:
            word_list = word.split('/')
            output = output + word_list[0].encode('utf-8') + " " + word_list[-1] + '\n'
        output = output + '\n'
    return output


def get_tags_dict(tags_file):
    tags_dict=dict()
    for line in tags_file.readlines():
        line = line.strip().split('-')
        tags_dict[line[-1]]=[]
    return tags_dict

def main():
    #creat model
    config = ConfigNer()
    model = NERModel(config)
    model.build()
    model.restore_session(config.dir_model)
    print ("model restore complete")
    ner = Ner_recognition()
    input_file_ = "data/59879167077a8b43d42838c5.txt"
    #output_file = "data/output1.txt"
    #data_string = "为满足广大投资者的理财需求，经易方达基金管理有限公司"
    tags_results,tags_dict = product_file(ner,input_file_, model)
    #tags_results = product_str(ner, data_string, model)
    if os.path.exists("tmp_result"):
        os.remove("tmp_result")
    output_file = open('tmp_result','w ')
    #output_file.write(tags_results)
    #print tags_dict
    for key in tags_dict:
        output_file.write(key + ":" + " ")
        #print tags_dict[key]
        output_file.writelines(" ".join(tags_dict[key])+'\n')
    """
    tags_results= tags_results.split('\n')

    for line in tags_results:
        try:
            line = line.strip().split()
            if line[1] in tags_dict.keys():
                tags_dict[line[1]].append(line[0])
        except IndexError:
            continue
    print tags_dict
    
    """
if __name__ == "__main__":
    main()