from pyhanlp import *
PerceptronNewSegment = HanLP.newSegment("perceptron")
# 中文人名识别
print("\n========== 中文人名识别 ==========\n")
def demo_chinese_name_recognition(sentences):
    segment = HanLP.newSegment().enableNameRecognize(True);
    for sentence in sentences:
        term_list = segment.seg(sentence)
        print(term_list)
        # print([i.word for i in term_list])

sentences = [
    "武大靖创世界纪录夺冠，中国代表团平昌首金",
    "区长庄木弟新年致辞",
    "凯瑟琳和露西（庐瑞媛），跟她们的哥哥们有一些不同。",
    "王国强、高峰、汪洋、张朝阳光着头、韩寒、小四",
    "张浩和胡健康复员回家了",
    "王总和小丽结婚了",
    "编剧邵钧林和稽道青说",
    "这里有关天培的有关事迹",]
demo_chinese_name_recognition(sentences)

print("========== 中文人名 基本默认已开启 ==========")
print(PerceptronNewSegment.seg(sentences[0]))

# 音译人名识别
print("\n========== 音译人名识别 ==========\n")
sentences = [
    "一桶冰水当头倒下，微软的比尔盖茨、Facebook的扎克伯格跟桑德博格、亚马逊的贝索斯、苹果的库克全都不惜湿身入镜，这些硅谷的科技人，飞蛾扑火似地牺牲演出，其实全为了慈善。",
    "世界上最长的姓名是简森·乔伊·亚历山大·比基·卡利斯勒·达夫·埃利奥特·福克斯·伊维鲁莫·马尔尼·梅尔斯·帕特森·汤普森·华莱士·普雷斯顿。",
]
segment = HanLP.newSegment().enableTranslatedNameRecognize(True)
for sentence in sentences:
    term_list = segment.seg(sentence)
    print(term_list)

print("========== 音译人名默认已开启 ==========")
print(PerceptronNewSegment.seg(sentences[0]))

# 日语人名识别
print("\n========== 日本人名识别 ==========\n")
def demo_japanese_name_recognition(sentences):
    segment = HanLP.newSegment().enableJapaneseNameRecognize(True)
    for sentence in sentences:
        term_list = segment.seg(sentence)
        print(term_list)
      #  print([i.word for i in term_list])

sentences = [
    "北川景子参演了林诣彬导演的《速度与激情3》",
    "林志玲亮相网友:确定不是波多野结衣？",
    "龟山千广和近藤公园在龟山公园里喝酒赏花",
]
demo_japanese_name_recognition(sentences)
print("========== 日本人名默认已开启 ==========")
print(PerceptronNewSegment.seg(sentences[0]))

# 机构名识别
print("\n========== 机构名识别 ==========\n")
sentences = [
    "我在上海林原科技有限公司兼职工作，",
    "我经常在台川喜宴餐厅吃饭，",
    "偶尔去开元地中海影城看电影。",
]
segment = HanLP.newSegment().enableOrganizationRecognize(True)
for sentence in sentences:
    term_list = segment.seg(sentence)
    print(term_list)

print("========== 机构名 标准分词器已经全部关闭 ==========")
print(PerceptronNewSegment.seg(sentences[1]))

# 地名识别
print("\n========== 地名识别 ==========\n")
def demo_place_recognition(sentences):
    segment = HanLP.newSegment().enablePlaceRecognize(True)
    for sentence in sentences:
        term_list = segment.seg(sentence)
        print(term_list)
       # print([i.word for i in term_list])

sentences = [
    "蓝翔给宁夏固原市彭阳县红河镇黑牛沟村捐赠了挖掘机",
    "中原工学院位于河南省郑州市新郑市龙湖镇柏树刘社区",
    "我的家乡在山东省枣庄市市中区",
]
demo_place_recognition(sentences)

print("========== 地名 默认已开启 ==========")
print(PerceptronNewSegment.seg(sentences[0]))

# URL 识别
print("\n========== URL识别 ==========\n")
sentences = [
    "HanLP的项目地址是https://github.com/hankcs/HanLP，",
    "发布地址是https://github.com/hankcs/HanLP/releases，",
    "我有时候会在www.hankcs.com上面发布一些消息，",
    "我的微博是http://weibo.com/hankcs/，会同步推送hankcs.com的新闻。",
]
for sentence in sentences:
    term_list = PerceptronNewSegment.seg(sentence)
    print(term_list)
print("========== URL默认已开启 ==========")
print(PerceptronNewSegment.seg(sentences[0]))



