#encoding=utf-8
'''
注：nltk是python的独立nlp平台模块，可下载相应的工具模块以纯python环境使用,如下：
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
!!!!!!!!!!!!!!!!!!!!!!nltk.download('wordnet')!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  Searched in:
    - 'C:\\Users\\Administrator/nltk_data'
    - 'C:\\D\\tools\\Python\\Python38\\nltk_data'
    - 'C:\\D\\tools\\Python\\Python38\\share\\nltk_data'
    - 'C:\\D\\tools\\Python\\Python38\\lib\\nltk_data'
    - 'C:\\Users\\Administrator\\AppData\\Roaming\\nltk_data'
    - 'C:\\nltk_data'
    - 'D:\\nltk_data'
    - 'E:\\nltk_data'

此demo只为初步体验语法解析结构图
Tree.fromstring(nlp_parse).draw()
本代码实现：
（1）处理txt文件中的内容

1.安装python模块：
    pip install stanfordcorenlp
    pip install nltk
2.下载java包 stanford-corenlp-x.x.x ，供StanfordCoreNLP调用
  nlp = StanfordCoreNLP(r'C:/D/tools/java/jdk1.8.0_121/lib/stanford-corenlp-4.2.0',lang='en')
'''

#获取词干
from nltk.stem.porter import PorterStemmer
ps = PorterStemmer()
def psstem(word):
  return ps.stem(word)
#获取词汇
from nltk.stem.wordnet import WordNetLemmatizer
wnl = WordNetLemmatizer()
def wnllem(word):
  return wnl.lemmatize(word)
#获取停用词
from nltk.corpus import stopwords
stopwordList = stopwords.words('english') 
def getstopwords():
  return stopwords.words('english')
#nltk分句
from nltk.tokenize import sent_tokenize
def getsentens(mysentences):
  return sent_tokenize(mysentences)
#nltk分词
from nltk.tokenize import word_tokenize
def getwords(mysentence):
  return word_tokenize(mysentence)


#————————————————
#版权声明：本文为CSDN博主「Mr番茄蛋」的原创文章，遵循CC 4.0 BY-SA版权协议，转载请附上原文出处链接及本声明。
#原文链接：https://blog.csdn.net/qq_35203425/article/details/80451243
if __name__ == '__main__':
  sentence = ''
  try:
    input_file = open('content.txt','r')
    sentence = input_file.read()
  except Exception as es:
    print('exception:::::: %s' % str(es))
  finally:
    pass
  targetworddict = {}
  sentenceList = getsentens(sentence)#分句
  for sen in sentenceList:
    wordList = getwords(sen)#分词
    for word in wordList:
      if word not in stopwordList:#去停用词
        targetworddict[word] = psstem(word)
  for k,v in targetworddict.items():
    print("%s---> %s " % (k,v))