# -*- coding:utf8 -*-
"""
直接调用本地的LTP模型进行相应分析
入参可能为unicode,需要转换为utf8,出参是否直接为unicode
"""
from __future__ import print_function
import os
import sys
import codecs

from pyltp import Segmentor, Postagger, NamedEntityRecognizer, Parser, SementicRoleLabeller, SentenceSplitter
from logger_utility.loggconfig import getLogger

if sys.version_info.major < 3:
    raise RuntimeError("Only Support Python3!")

LTP_DIR = os.path.dirname(os.path.realpath(__file__))
UTILITY_DIR = os.path.dirname(LTP_DIR)
MODELS_DIR = os.path.join(os.path.dirname(UTILITY_DIR), 'models', 'ltp_data_v3.4.0')
if not os.path.exists(MODELS_DIR):
    raise RuntimeError("Model Path Not Exists!")

TMP_USER_DICT = os.path.join(
    os.path.dirname(
        os.path.realpath(__file__)),
    'Tmp_User_Dict.txt')

logger = getLogger("LtpWraps")


class LtpWrapper(object):

    instance = {}

    def __init__(self, user_dict_path_or_list=None):
        """
        初始化用户字典模型
        :param user_dict_path_or_list: 用户字典路径或者字典本身
        """
        self.user_dict = user_dict_path_or_list

    def __new__(cls, *args, **kwargs):
        if args:
            user_dict = args[0]
        else:
            user_dict = None

        if isinstance(user_dict, list):
            user_dict = tuple(user_dict)

        if user_dict not in cls.__dict__['instance']:
            cls.__dict__['instance'][user_dict] = object.__new__(
                cls, *args, **kwargs)
        return cls.__dict__['instance'][user_dict]

    def __getattr__(self, item):
        instances = {
            "segmentor": (Segmentor, 'cws.model'),
            "postagger": (Postagger, 'pos.model'),
            "recognizer": (NamedEntityRecognizer, 'ner.model'),
            "parser": (Parser, 'parser.model'),
            "labeller": (SementicRoleLabeller, 'pisrl.model'),
        }
        assert item in instances.keys()

        model = instances[item][0]()
        model_path = os.path.join(
            MODELS_DIR,
            instances[item][1])
        logger.info("Load Ltp Data Model: %s", model_path)

        if item == 'segmentor' and self.user_dict:
            if isinstance(self.user_dict, str):
                model.load_with_lexicon(
                        model_path, self.user_dict)  # 加载模型
                logger.info('Load User Dict %s: %s', self.user_dict, len(
                    codecs.open(self.user_dict, encoding='utf8').readlines()))
            else:
                with codecs.open(TMP_USER_DICT, 'w', encoding='utf8') as fo:
                    for line in self.user_dict:
                        line = line.strip()
                        if not line:
                            continue
                        fo.write(line)
                        fo.write('\n')
                model.load_with_lexicon(
                    model_path, TMP_USER_DICT)
                logger.info(
                    'Load User Dict %s: %s', TMP_USER_DICT, len(
                        self.user_dict))

        else:
            model.load(model_path)

        self.__dict__[item] = model

        logger.info('Load Ltp Ins %s, %s, %s', item, model, model_path)
        return model

    def reload_label(self):
        self.labeller.release()
        self.__getattr__("labeller")

    def split(self, content):
        sents = SentenceSplitter.split(content)
        return sents

    def segment(self, content):
        words = self.segmentor.segment(content)
        return list(words)

    def postag(self, content):
        words = self.segmentor.segment(content)
        postags = self.postagger.postag(words)
        return list(words), list(postags)

    def ner(self, content):
        words = self.segmentor.segment(content)
        postags = self.postagger.postag(words)
        netags = self.recognizer.recognize(words, postags)
        return list(words), list(postags), list(netags)

    def parse(self, content):
        words = self.segmentor.segment(content)
        postags = self.postagger.postag(words)
        arcs = self.parser.parse(words, postags)
        _arcs = [(arc.head, arc.relation) for arc in arcs]
        return list(words), list(postags), _arcs

    def parse_ner(self, content):
        words = self.segmentor.segment(content)
        postags = self.postagger.postag(words)
        netags = self.recognizer.recognize(words, postags)
        arcs = self.parser.parse(words, postags)
        _arcs = [(arc.head, arc.relation) for arc in arcs]
        return list(words), list(postags), list(netags), _arcs

    def label(self, content):
        words = self.segmentor.segment(content)
        postags = self.postagger.postag(words)
        netags = self.recognizer.recognize(words, postags)
        arcs = self.parser.parse(words, postags)
        roles = self.labeller.label(words, postags, arcs)
        _arcs = [(arc.head, arc.relation) for arc in arcs]
        _roles = [(role.index, [(arg.name, arg.range.start, arg.range.end) for arg in role.arguments]) for role in roles]
        return list(words), list(postags), list(netags), _arcs, _roles


if __name__ == "__main__":
    from logger_utility.loggconfig import LoggerConfig, DEBUG
    LoggerConfig().config(True, DEBUG)

    def test1():
        try:
            ltp = LtpWrapper()
            for i in range(100):
                for w in ltp.segment('你是一个贱人'):
                    print(w)
                import time
                time.sleep(2)
        except Exception as e:
            # print(e)
            pass

    def test2():
        ltp = LtpWrapper()
        words, postags, arcs = ltp.parse('你恒大广场--Lily')
        print(len(words), len(postags), len(arcs))
        print("\t".join("%d:%s" % (arc.head, arc.relation) for arc in arcs))

    def test3():
        ltp = LtpWrapper()
        words = ltp.segment('你是一个贱人')

    def ner_test():
        ltp = LtpWrapper()
        _ = ltp.label("刘德华是一名歌手")
        words, postags, ners, _arcs, roles = _
        print(words, postags, ners, _arcs, roles)

    ner_test()
