# coding=utf-8
import sys,os
from py4j.java_gateway import JavaGateway,GatewayParameters,java_import
import py4j
import requests,re


jbean_NotionalTokenizer = None
jbean_HanLP = None
jbean_CRFnewSegment=None
java_jvm = None
def startJavaGateway():
    global jbean_NotionalTokenizer,jbean_HanLP,jbean_CRFnewSegment,java_jvm
    if java_jvm==None:
        address = '192.168.0.42'
        print('startJavaGateway.....',address)
        gpm = GatewayParameters(address=address, read_timeout=60 * 3, auto_field=True)
        gateway = JavaGateway(gateway_parameters=gpm)
        java_jvm = gateway.jvm
        java_import(java_jvm, "com.hankcs.hanlp.tokenizer.NotionalTokenizer")
        java_import(java_jvm, "com.hankcs.hanlp.dictionary.CustomDictionary")
        java_import(java_jvm, "com.hankcs.hanlp.HanLP")
        java_import(java_jvm, 'com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary')
        java_import(java_jvm, 'com.hankcs.hanlp.tokenizer.SpeedTokenizer')
        java_import(java_jvm,'com.hankcs.hanlp.model.crf.CRFLexicalAnalyzer')
        java_import(java_jvm, 'com.hankcs.hanlp.summary.TextRankKeyword')
        java_import(java_jvm, 'com.hankcs.hanlp.tokenizer.StandardTokenizer')
        java_import(java_jvm, 'com.hankcs.hanlp.tokenizer.NLPTokenizer')
        java_import(java_jvm, 'com.hankcs.hanlp.tokenizer.IndexTokenizer')
        jbean_NotionalTokenizer = java_jvm.NotionalTokenizer
        jbean_HanLP = java_jvm.HanLP
        #jbean_CRFnewSegment = jbean_HanLP.newSegment("crf")
        jbean_CRFnewSegment = java_jvm.CRFLexicalAnalyzer()
startJavaGateway()
class HanlpClient(object):
    def __init__(self):
        self.init_java_jvm()

    def init_java_jvm(self):
        global java_jvm
        try:
            self.jvm = java_jvm
            self.NotionalTokenizer = jbean_NotionalTokenizer
            self.HanLP = jbean_HanLP
            self.CRFnewSegment = jbean_CRFnewSegment
            self.enable_config(jbean_NotionalTokenizer.SEGMENT)
            self.enable_config(jbean_CRFnewSegment)
            self.stop_words = []
        except Exception as ex:
            exname = type(ex).__name__
            print(exname,ex)
            exname = exname.lower()
            if 'py4j' in exname:
                java_jvm = None
                startJavaGateway()
                self.init_java_jvm()

    def import_java(self,classname):
        java_import(self.jvm,classname)
    def segment(self,text,length=None):
        _type = type(text)
        if _type==str:
            items = self.NotionalTokenizer.segment(text)
            if length==None:
                return [{'word':item.word,'flag':str(item.nature),'offset':item.offset} for item in items]
            else:
                result = []
                for item in items:
                    if len(item.word) >= length:
                        result.append({'word': item.word, 'flag': str(item.nature), 'offset': item.offset})
                return result
        elif _type==list:
            result = []
            for txt in text:
                result.append(self.segment(txt,length=length))
            return result
    def add_custom_stop_word(self,word):
        try:
            CoreStopWordDictionary = self.jvm.CoreStopWordDictionary
            if type(word) == list:
                self.stop_words.extend(word)
                for w in word:
                    CoreStopWordDictionary.add(w)
            else:
                self.stop_words.append(word)
                CoreStopWordDictionary.add(word)
        except Exception as ex:
            pass
    def remove_custom_stop_word(self,word):
        try:
            CoreStopWordDictionary = self.jvm.CoreStopWordDictionary
            if type(word) == list:
                for w in word:
                    CoreStopWordDictionary.remove(w)
            else:
                CoreStopWordDictionary.remove(word)
        except Exception as ex:
            pass
    def add_custom_word(self,word):
        try:
            CustomDictionary= self.jvm.CustomDictionary
            if type(word) == list:
                for w in word:
                    CustomDictionary.add(w)
            else:
                CustomDictionary.add(word)
        except Exception as ex:
            pass
    def remove_custom_word(self,word):
        try:
            CustomDictionary= self.jvm.CustomDictionary
            if type(word) == list:
                for w in word:
                    CustomDictionary.remove(w)
            else:
                CustomDictionary.remove(word)
        except Exception as ex:
            pass
    def get_custom_words(self):
        CustomDictionary = self.jvm.CustomDictionary
        trie = CustomDictionary.trie
        if trie!=None:
            return trie.keySet()
        else:
            return []
    def get_stop_words(self):
        return self.stop_words
    def get_word_flag(self,word):
        wds = self.segment(word)
        if len(wds)>0:
            return wds[0]['flag']
        else:
            return ''
    def speedSegment(self,text,length=2):
        words = self.jvm.SpeedTokenizer.segment(text)
        result = []
        for word in words:
            if len(word.word)>=length:
                result.append(word.word)
        return result
    def extractPhrase(self,text,topn=None):
        if topn==None:
            topn = len(text)
        _type = type(text)
        if _type == str:
            items = self.HanLP.extractPhrase(text,topn)
            return items
        elif _type==list:
            result = []
            for txt in text:
                result.append(self.extractPhrase(txt,topn=topn))
            return result
    def extractKeyword(self,text,topn=None):
        if topn==None:
            topn = len(text)

        #textRankKeyword = self.jvm.TextRankKeyword(self.NotionalTokenizer.SEGMENT)
        #seg = self.segment(text,length=2)
        #seg = [w['word'] for w in seg]
        #seg = ' '.join(seg)
        items = self.HanLP.extractKeyword(text,topn)
        #items = textRankKeyword.getKeywords(text,topn)
        #items = textRankKeyword.getKeywordList(text,topn)
        return items
    def getKeyword(self,text,topn=None):
        if topn==None:
            topn = len(text)
        textRankKeyword = self.jvm.TextRankKeyword(self.NotionalTokenizer.SEGMENT)
        return textRankKeyword.getKeywords(text,topn)
    def getSummary(self,content,max_length=None,sentence_separator=None):
        if max_length==None:
            max_length=len(content)
        if sentence_separator==None:
            return self.HanLP.getSummary(content,max_length)
        else:
            return self.HanLP.getSummary(content, max_length,sentence_separator)
    def extractSummary(self,content,size,sentence_separator=None):
        if sentence_separator==None:
            return self.HanLP.extractSummary(content,size)
        else:
            return self.HanLP.extractSummary(content,size,sentence_separator)
    def standardSegment(self,content):
        words = self.jvm.StandardTokenizer.segment(content)
        return [{'word':item.word,'flag':str(item.nature),'offset':item.offset} for item in words]
    def nlpSegment(self,content,length=None):
        NLPTokenizer = self.jvm.NLPTokenizer
        #NLPTokenizer.ANALYZER.enableOrganizationRecognize(True)
        #self.enable_config(NLPTokenizer.ANALYZER)
        words = NLPTokenizer.segment(content)
        if length==None:
            return [{'word': item.word, 'flag': str(item.nature),'offset':item.offset} for item in words]
        else:
            result = []
            for item in words:
                if len(item.word)>=length:
                    result.append({'word': item.word, 'flag': str(item.nature),'offset':item.offset})
            return result
    def indexSegment(self,content):
        words = self.jvm.IndexTokenizer.segment(content)
        return [{'word': item.word, 'flag': str(item.nature),'offset':item.offset} for item in words]
    def crfSegment(self,content,length=None):
        #self.enable_config(CRFnewSegment)
        '''
        words = self.CRFnewSegment.seg(content)
        #print words
        if length==None:
            return [{'word': item.word, 'flag': str(item.nature),'offset':item.offset} for item in words]
        else:
            result = []
            for item in words:
                if len(item.word)>=length:
                    result.append({'word': item.word, 'flag': str(item.nature),'offset':item.offset})
            return result
        '''
        _type = type(content)
        if _type==str:
            sentence = self.CRFnewSegment.analyze(content)
            words =sentence.wordList
            if length == None:
                return [{'word':item.getValue(), 'flag': item.label, 'offset': 0} for item in words]
            else:
                result = []
                for item in words:
                    word = item.getValue()
                    if len(word) >= length:
                        result.append({'word': word, 'flag': item.label, 'offset': 0})
                return result
        elif _type==list:
            result = []
            for txt in content:
                result.append(self.crfSegment(txt, length=length))
            return result
    def enable_config(self,segment):
        segment.enableNameRecognize(True)
        segment.enablePlaceRecognize(True)
        segment.enableAllNamedEntityRecognize(True)
        segment.enableCustomDictionary(True)
        segment.enableCustomDictionaryForcing(True)
        segment.enableOrganizationRecognize(True)
        segment.enableTranslatedNameRecognize(True)
        segment.enableJapaneseNameRecognize(True)
        segment.enableMultithreading(True)
        segment.enablePartOfSpeechTagging(True) # 依然支持隐马词性标注

    def replaceCharEntity(self,htmlstr):

        CHAR_ENTITIES = {'nbsp': '', '160': ' ', 'lt': '<', '60': '<',
                         'gt': '>', '62': '>',
                         'amp': '&', '38': '&',
                         'quot': '"', '34': '"'}
        re_charEntity = re.compile(r'&#?(?P<name>\w+);')
        sz = re_charEntity.search(htmlstr)
        while sz:
            entity = sz.group()
            key = sz.group('name')
            try:
                htmlstr = re_charEntity.sub(CHAR_ENTITIES[key], htmlstr, 1)
                sz = re_charEntity.search(htmlstr)
            except KeyError:
                htmlstr = re_charEntity.sub('', htmlstr, 1)
                sz = re_charEntity.search(htmlstr)
        htmlstr = htmlstr.replace('&nbsp', '')
        return htmlstr

    def filter_tags(self,htmlstr):
        if htmlstr == None:
            return ""
        re_cdata = re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I)  # 匹配CDATA
        re_script = re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I)  # Script
        re_style = re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I)  # style
        re_br = re.compile('<br\s*?/?>')  # 处理换行
        re_h = re.compile('</?\w+[^>]*>')  # HTML标签
        re_comment = re.compile('<!--[^>]*-->')  # HTML注释
        s = re_cdata.sub('', htmlstr)  # 去掉CDATA
        s = re_script.sub('', s)  # 去掉SCRIPT
        s = re_style.sub('', s)  # 去掉style
        s = re_br.sub('\n', s)  # 将br转换为换行
        s = re_h.sub('', s)  # 去掉HTML 标签
        s = re_comment.sub('', s)  # 去掉HTML注释
        blank_line = re.compile('\n+')
        s = blank_line.sub('\n', s)
        s = self.replaceCharEntity(s)  # 替换实体
        # s = s.replace(' ','')
        return s

    def check_contain_chinese(self, check_str):
        ds = check_str
        for ch in ds:
            if u'\u4e00' <= ch <= u'\u9fff':
                return True

        return False
    def recognize_content(self,content,extractSummary=False):
        wds = self.crfSegment(content, length=2)
        names = []
        company = []
        addr = []
        for k in wds:
            word = k['word']
            flag = k['flag']
            if not self.check_contain_chinese(word):
                continue
            if flag == 'nr' or flag == 'nrf' or flag == 'nrj':
                if word not in names:
                    names.append(word)
            if flag== 'nt' or flag=='nz':
                if word not in company:
                    company.append(word)
            if flag == 'ns':
                if word not in addr:
                    addr.append(word)
        result = {'errorCode': 0}
        result['name'] = names
        result['organization'] = company
        result['place'] = addr
        if extractSummary:
            result['summary'] = self.extractSummary(content,3)
        return result
    def parse_url(self,url):
        req = requests.get(url,timeout=30)
        content = req.content.decode('utf-8')
        content = self.filter_tags(content)
        content = content.replace(' ','').replace('\n','').replace('\r','')
        result = self.recognize_content(content,extractSummary=True)
        result['url'] = url
        return result
def test():

    hanlp = HanlpClient()
    # ss = hanlp.crfSegment('非法采砂')
    words = '扫黑除恶非法采砂三亚新机场三亚总部经济'
    hanlp.add_custom_stop_word(['举办'])
    hanlp.jvm.CustomDictionary.add('海口日月广场', 'ns 10')
    hanlp.add_custom_word(['南繁科技城', '参保计划', '海口日月广场 ns 10','三亚新机场','南繁科技城','宁夏固原市彭阳县红河镇黑牛沟村'])
    result = hanlp.recognize_content('其实还包含一个问题。怎么看出一个程序员是不是喜欢你？')
    print(result)
    s = '举办低碳制造业1.陈家海举办海南省首届知识产权五指山论坛，联署发布《知识产权运营支撑海南自由贸易试验区和中国特色自由贸易港建设的海南共识》；组织赴中国证监委、国家市场监管总局、国家知识产权局征求指导，与省地方金融办、海南证监局进行了多次对接，赴上交所、深交所寻求支持；与阿特多多等20几家知识产权服务交易机构和金融公司就知识产权证券化和知识产权交易中心建设进行洽商。2.完成了《海南知识产权证券化方案》《知识产权证券化交易模式基本结构及操作流程》和《知识产权证券化（IPS)工作预算》。3.组建了海南省知识产权证券化专家顾问团队和推进机构；2月22日，组织海南省知识产权证券化工作推进小组第一次会议。4.与上海证券交易所、广东省战略知识产权研究院、浙江阿特多多知识产权交易中心有限公司和北京华智大为科技有限公司签属了战略合作框架协议。5.在我省医药行业等产业开展摸底，征集企业需求，举办说明会，筹备我省知识产权证券化第一单；2月22日，举办知识产权证券化系列活动；4月1日，组织知识产权证券化标的介绍会，并举行首批知识产权证券化标的入库签约仪式；与上海证券交易所沟通对接IPS发单模式。6.2018年12月21日，奇艺世纪知识产权供应链（ABS）在上海证券交易所成功发行；2月14日，在中国（海南）自由贸易试验区制度创新案例新闻发布会上，全国首单知识产权证券化作为海南自贸区第一批制度创新的典型案例正式发布。7.指导海口市成功申报国家知识产权运营服务体系建设重点城市，印发了《海口市知识产权运营服务体系建设实施方案》。目前《海口市知识产权运营服务体系建设专项资金管理办法》正在征求意见。8.向有关单位定向征集知识产权交易中心建设方案，并组织5位领域专家及知识产权证券化推进机构，对交易中心建设方案进行了评审，并将评审结果报送省地方金融监管局；赴国家知识产权局协调中国（海南）国际知识产权交易中心落地事宜；3月6日，参加省地方金融监管局组织召开的知识产权交易中心组建工作会议，研究交易中心的股权设置、发起人确定、业务开展及合规性监管等问题；启动浙江阿特多多知识产权交易中心迁址海南前期工作，省地方金融监督管理局与浙江省地方金融监督管理局联系，征询该交易中心情况及迁址意见。9.引入广东省战略知识产权研究院在海南落地，注册“海南新盛世知识产权运营有限公司”，并组织了揭牌仪式。'

    s = '举办蓝翔给宁夏固原市彭阳县红河镇黑牛沟村捐赠了挖掘机,我在海南新境界软件有限公司工作,偶尔去地中海影城看电影,三亚新机场南繁科技城正在开工建设'
    # s = '我经常在台川喜宴餐厅吃饭'
    # s = '举办万宁市礼纪镇青云村全面实施全民参保计划多规合一,习大大习总书记'
    print ('=' * 30, 'Hanlp标准分词和CRF分词对比', "=" * 30)
    print ('停用词：', ' '.join(hanlp.get_stop_words()))
    print ('自定义词：', ' '.join(hanlp.get_custom_words()))
    print("-" * 100)
    print ("原句子：", s)
    print("-" * 100)
    ks = hanlp.segment(s, length=2)
    print ('标准分词：')
    for k in ks:
        print (k['word'] + '/' + k['flag'],)
    print()
    print("-" * 100)
    ks = hanlp.crfSegment(s, length=2)
    print ('CRF分词:')
    for k in ks:
        print (k['word'] + '/' + k['flag'],)
    print()
    print("-" * 50)

    s = "签约仪式前，吴坤等一同会见了参加签约的企业家，在海口宾馆举行,同时在海口日月广场设有分会场。微软公司的比尔盖茨、Facebook的扎克伯格跟桑德伯格、苹果的库克全都不惜湿身入镜，北川景子参演了" \
        "\n参数企业有海南新境界软件有限公司，海南沃思科技公司等知名企业"

    print ('原文：', s)

    wds = hanlp.nlpSegment(s, length=2)
    # wds = hanlp.crfSegment(s)
    names = ''
    company = ''
    addr = ''
    for k in wds:
        if k['flag'] == 'nr' or k['flag'] == 'nrf' or k['flag'] == 'nrj':
            names += k['word'] + ' '
        if k['flag'] == 'nt':
            company += k['word'] + ' '
        if k['flag'] == 'ns':
            addr += k['word'] + ' '
        print(k['word'],k['flag'])
    print ('-' * 50)
    print ('上文中有提到的相关人员：', names)
    print ('上文中有提到知名企业：', company)
    print ('上文涉及到的地点：', addr)

    result = hanlp.parse_url('http://www.hainan.gov.cn/hainan/newldhd/201904/eaff783c2031488cb6e39e331bd57d7a.shtml')
    print ('-' * 50)
    print ('分析URL事件')
    print (result['url'])
    print ('涉及到以下相关信息')
    print ('-' * 50)
    print ('提到的相关人员：', ' '.join(result['name']))
    print ('提到组织：', ' '.join(result['organization']))
    print ('涉及到的地点：', ' '.join(result['place']))
    print ('-' * 50)
    print ('主要事件')
    index = 1
    for line in result['summary']:
        print (index, line)
        index += 1

    print (' '.join(hanlp.extractPhrase('加快推进快递业绿色包装应用')))
if __name__ == '__main__':
    test()