
from stanfordcorenlp import StanfordCoreNLP
import jieba
import nltk
# nltk.download("punkt")

sentence1 = "大家想想，海洋占了地球面积的75％。"
sentence2 = "When you think about it, the oceans are 75 percent of the planet."
nlp = StanfordCoreNLP('D:\\CoreNLp\\stanford-corenlp-full-2024-12-25', lang='zh')
nlp2 = StanfordCoreNLP('D:\\CoreNLp\\stanford-corenlp-full-2024-12-25', lang='en')


seg_list = jieba.cut(sentence1, cut_all=False)
tokens = nltk.word_tokenize(sentence2)
print(list(seg_list))
print(tokens)
