#encoding=utf-8
'''
  Searched in:
    - 'C:\\Users\\Administrator/nltk_data'
    - 'C:\\D\\tools\\Python\\Python38\\nltk_data'
    - 'C:\\D\\tools\\Python\\Python38\\share\\nltk_data'
    - 'C:\\D\\tools\\Python\\Python38\\lib\\nltk_data'
    - 'C:\\Users\\Administrator\\AppData\\Roaming\\nltk_data'
    - 'C:\\nltk_data'
    - 'D:\\nltk_data'
    - 'E:\\nltk_data'
'''
#获取词干
from nltk.stem.porter import PorterStemmer
ps = PorterStemmer()
def psstem(word):
  return ps.stem(word)
#获取词汇
from nltk.stem.wordnet import WordNetLemmatizer
wnl = WordNetLemmatizer()
def wnllem(word):
  return wnl.lemmatize(word)
'''停止词'''
from common import stopwordDict
#nltk分句
from nltk.tokenize import sent_tokenize
def getsentens(mysentences):
  return sent_tokenize(mysentences)
#nltk分词
from nltk.tokenize import word_tokenize
def getwords(mysentence):
  return word_tokenize(mysentence)
from stanfordcorenlp import StanfordCoreNLP
nlp = StanfordCoreNLP(r'C:/D/tools/java/jdk1.8.0_121/lib/stanford-corenlp-4.2.0',lang='en')
if __name__ == '__main__':
  text = ''
  sentence = '''Archegos’ forced liquidation of several of its positions triggered a fire sale of a number of U.S. media stocks last week.'''
  text = '%s\n%s' % (text,sentence)
  tokens = nlp.word_tokenize(sentence)
  #print('Tokenize:', tokens)
  text = '%s\n%s' % (text,'*'*10+'words'+'*'*10)
  for token in tokens:
    if  not stopwordDict.__contains__(token):
      text = '%s\n%s' % (text,'{0:<20}{1:>6}--->{2:<20}|{3}'.format(token,'',psstem(token),wnllem(token)))
  #print('Part of Speech:', nlp.pos_tag(sentence))
  #print('Named Entities:', nlp.ner(sentence))
  #print('Constituency Parsing:', nlp.parse(sentence))
  #print('Dependency Parsing:', nlp.dependency_parse(sentence))
  text_file = ''
  try:
    text = text + '\n' + nlp.parse(sentence) + '\n'
    text_file = open('result-sentence.txt','w')
    text_file.write(text)
  except Exception as ex:
    print('exception:::------'+str(ex))
  finally:
    if text_file:
      text_file.close()
  nlp_parse = nlp.parse(sentence)
  from nltk.tree import Tree
  #Tree.fromstring(nlp_parse).draw()
  print(Tree.fromstring(nlp_parse)._repr_png_())
  nlp.close() # Do not forget to close! The backend server will consume a lot memery.
