import os
LTP_DATA_DIR = 'E:/NLP/ltp_data_v3.4.0/ltp_data_v3.4.0'  # ltp模型目录的路径
cws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model')  # 分词模型路径，模型名称为`cws.model`
pos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model')  # 词性标注模型路径，模型名称为`pos.model`
ner_model_path = os.path.join(LTP_DATA_DIR, 'ner.model')  # 命名实体识别模型路径，模型名称为`pos.model`
par_model_path = os.path.join(LTP_DATA_DIR, 'parser.model')  # 依存句法分析模型路径，模型名称为`parser.model`
srl_model_path = os.path.join(LTP_DATA_DIR, 'pisrl_win.model')  # 语义角色标注模型目录路径，模型目录为`srl`。注意该模型路径是一个目录，而不是一个文件。

from pyltp import SentenceSplitter
from pyltp import Segmentor
from pyltp import Postagger
from pyltp import NamedEntityRecognizer
from pyltp import Parser
from pyltp import SementicRoleLabeller
class LW:
    def __init__(self,id=None,arguments=None):
        self.index=id
        self.arguments=arguments
class Ar:
    def __init__(self,na=None,rg=None):
        self.name=na
        self.range=rg
class range:
    def __init__(self,st=None,en=None):
        self.start=st
        self.end=en
def trans(roles):
    role_s=[]
    for r in roles:
        role = LW()
        args = []
        role.index=r[0]
        for a in r[1]:
            arg = Ar()
            ra = range()
            arg.name=a[0]
            ra.start=a[1][0]
            ra.end=a[1][1]
            arg.range=ra
            args.append(arg)
        role.arguments=args
        role_s.append(role)
    return role_s
#分词
def segmentor(sentence='元芳，你怎么看'):
    segmentor = Segmentor(cws_model_path)  # 初始化实例
    words = segmentor.segment(sentence)  # 分词
    print(type(words))
    #默认可以这样输出
    # 可以转换成List 输出
    words_list = list(words)
    segmentor.release()  # 释放模型
    return words_list

def posttagger(words):
    postagger = Postagger(pos_model_path) # 初始化实例
    postags = postagger.postag(words)  # 词性标注
    postagger.release()  # 释放模型
    return postags

#命名实体识别
def ner(words, postags):
    recognizer = NamedEntityRecognizer(ner_model_path) # 初始化实例
    netags = recognizer.recognize(words, postags)  # 命名实体识别
    recognizer.release()  # 释放模型
    return netags

#依存语义分析
def parse(words, postags):
    parser = Parser(par_model_path) # 初始化实例
    arcs = parser.parse(words, postags)  # 句法分析
    parser.release()  # 释放模型
    return arcs

#角色标注
def role_label(words, postags, arcs):
    labeller = SementicRoleLabeller(srl_model_path) # 初始化实例
    roles = labeller.label(words, postags,  arcs)  # 语义角色标注
    print(roles)
    role_s=trans(roles)
    labeller.release()  # 释放模型
    return role_s
if __name__ == '__main__':
    sp = open('data/sp.txt', 'r',encoding='utf-8')
    f = open('data/role_result.txt', 'a', encoding='utf-8')
    passage=sp.readline()
    i=0
    while passage:
        i=i+1
        words=segmentor(passage)
        tags=posttagger(words)
        arcs=parse(words,tags)
        roles=role_label(words,tags,arcs)
        j=1
        for role in roles:
            space='\t'*j
            j=j+1
            f.write('谓词:'+words[role.index]+'\n')
            for arg in role.arguments:
                f.write(space+arg.name+':'+''.join(words[arg.range.start:arg.range.end+1])+'\n')
        f.write('#########################以上为第{}句#########################\n'.format(i))
        passage=sp.readline()
    sp.close()
    f.close()
