# 1. 字符串处理
# 2. 正则语法
# 3. 常用函数
# 4. nltk工具包
# 5. 停用词过滤
# 7. 词性标注
# 8. 数据清洗
# 9. Spacy工具包
import spacy
nlp = spacy.load('en_core_web_sm')
text = ("When Sebastian Thrun started working on self-driving cars at "
        "Google in 2007, few people outside of the company took Jack "
        "seriously. “I can tell you very senior CEOs of major American "
        "car companies would shake my hand and turn away because I wasn’t "
        "worth talking to,” said Thrun, in an interview with Recode earlier "
        "this week.")
doc = nlp(text)
# 分词
def In1():
    for tokens in doc:
        print(tokens)
# 分局
def In2():
    for sent in doc.sents:
        print(sent)
# 词性
def In3():
    for token in doc:
        print('{}-{}'.format(token,token.pos_))
# 命名体识别
def In4():
    for ent in doc.ents:
        print('{}-{}]'.format(ent, ent.label_))
# 展示
def In5():
    from spacy import displacy
    displacy.render(doc,style='ent',manual=True)
# 查找小说中人物出现次数
def In6():
    from collections import Counter
    with open('book1.txt', 'r') as f:
        text = f.read()
        processed_text = nlp(text)
        sentences = [s for s in processed_text.sents]
        print(len(sentences))
        def find_person(doc):
            c = Counter()
            for ent in doc.ents:
                if ent.label_ == 'PERSON':
                    c[ent.lemma_] += 1
            return c.most_common(10)
        print(find_person(processed_text))
# In6()


# 10. 结巴分词器(中文分词)
# import jieba