#!/usr/bin/env python
# -*- coding: utf-8 -*-

import spacy


def cleanup(token, lower=True):
    if lower:
        token = token.lower()
        return token.strip()


class spacy_info():
    def __init__(self):
        self.nlp = spacy.load('en')

    def info(self, value):
        ret_dict = dict()
        # 基于结巴的中文分词,但是没看出来有什么实际效果
        rets = self.nlp(value)
        tokenize = [token for token in rets]
        tags = {ret.pos: [ret.pos_, ret] for ret in rets}
        # print(tags)

        labels = set([w.label_ for w in rets.ents])
        ner_dict = dict()
        for label in labels:
            entities = [cleanup(e.string, lower=True) for e in rets.ents if label == e.label_]
            entities = list(set(entities))[0]
            ner_dict.update({entities: label})

        ret_dict = {'tokenize': tokenize, 'pos': tags, 'ner': ner_dict}
        return ret_dict


if __name__ == '__main__':
    spy = spacy_info()
    value = 'Rami Eid is studying at Stony Brook University in New York'
    ret = spy.info(value)
    print(ret)
