#!/usr/bin/python

import stanza
# stanza.download('zh', processors='tokenize, ner')       # This downloads the English models for the neural pipeline
nlp = stanza.Pipeline('zh', processors={'ner':'tokenize'}, use_gpu=False, download_method=None) # This sets up a default neural pipeline in English
doc = nlp("欢迎来到的中国，你要去哪个城市？")
# doc.sentences[0].print_dependencies()
res = []
for i in doc.entities:
    print(i)
    i = str(i)
    i = i.replace("{","").replace("}","").replace('"',"").replace("text","").replace(":","").replace("\n","").replace(" ","").split(",")[0]
    res.append(i)
print(res)
# print(doc.entities)