#-*- coding: utf-8 -*-
#import jieba,re
#str_text = 'Taylor Swift Just Slammed This Netflix Show for a "Deeply Sexist" Joke'
#str_jing2=jieba.cut(str_text,cut_all=False)
#print('add_word前:'+"/".join(str_jing2))
#添加自定义词
#jieba.add_word('Deeply Sexist')
#str_jing3=jieba.cut(str_text,cut_all=False)
#print('add_word后:'+"/".join(str_jing3))
# #修正词频
# jieba.suggest_freq('野生动物园',tune=True)
# str_jing4=jieba.cut(str_text,cut_all=False)
# print('suggest_freq后:'+"/".join(str_jing4))
#skey = re.compile('Deeply Sexist')
#sprint(key.findall(str_text))

#from nltk.corpus import treebank
#t = treebank.parsed_sents('E:\work_store\program\python\news.txt')[0]
#t = treebank.parsed_sents('wsj_0001.mrg')[0]
#t.draw()
import sys
'''分句'''
from nltk.tokenize import sent_tokenize
def getsentencelist(context):
  return sent_tokenize(context)

'''分词'''
from nltk.tokenize import word_tokenize
def getwordlist(sentences):
  return word_tokenize(sentences)

'''停止词'''
from myconstantnlp import stopwordDict

'''基于Porter词干提取算法'''
from nltk.stem.porter import PorterStemmer
ps= PorterStemmer()
#stem = ps.stem('running')  #  u'multipli'
def psstem(word):
    return ps.stem(word)

'''词形还原'''
from nltk.stem import WordNetLemmatizer
wn = WordNetLemmatizer()
def wnlem(word):
    return wn.lemmatize(word)

'''基于Lancaster 词干提取算法'''
from nltk.stem.lancaster import LancasterStemmer
ls = LancasterStemmer()
#ls.stem('multiply')      #  'multiply' 
def lsstem(word):
    return ls.stem(word)
'''WordNetLemmatizer获取词汇'''
from nltk.stem.wordnet import WordNetLemmatizer
wnl = WordNetLemmatizer()
def wnllem(word):
  return wnl.lemmatize(word)

#'''基于Snowball 词干提取算法   '''
from nltk.stem import SnowballStemmer
snowball_stemmer = SnowballStemmer('english')
snowball_stemmer.stem('multiply')   #  u'multipli'

#from myHaiciFanyi import getWordMean
from myYoudaoFanyi import getWordMean,getSentenceMean

'''分解一个句字的单词'''
#句字与词义的dict集合list （音标待添加）
senList = []
divide_mark = '-'
#记录已查询过的单词
word_record = {}
def processSentence(sentence):
  text = '### '+divide_mark*25+'sentence'+divide_mark*25
  text = '%s\n***%s***' % (text,sentence)
  tokens = getwordlist(sentence)
  text = '%s\n#### %s' % (text,'*'*10+'words'+'*'*10)
  #拼装句字与词义list 用来生成excel
  excel_sentence = ''
  for token in tokens:
    if word_record.get(token) != None :
      word_record[token] += 1
      continue
    #将未查过的词存入word_record
    else :
      word_record[token] = 1
    if not stopwordDict.__contains__(token.lower()):
      _word = wnllem(token)
      ps_word = psstem(token)
      #text = '%s\n%s' % (text,'{0:<20}{1:>6}--->{2:<20}|{3}'.format(token,' ',psstem(token),_word))
      text = '%s\n##### %s' % (text,'{0:<20}--->{1:<20}|{2}'.format(token,ps_word,_word))
      #拼装句字与词义list 用来生成excel
      excel_sentence = '%s\n%s' % (excel_sentence,'{0:<20}--->{1:<20}|{2}'.format(token,ps_word,_word))
      wordMean = getWordMean(_word)
      if len(wordMean)<7: #wordMean-->[,],\n,[,],\n
        wordMean = getWordMean(ps_word)
      text = '%s\n%s' % (text,wordMean)
      #拼装句字与词义list 用来生成excel
      excel_sentence = '%s\n%s' % (excel_sentence,wordMean)
  #英译中
  cn_sentence = getSentenceMean(sentence)
  #拼装句字与词义list 用来生成excel
  senDict = {'sen':sentence,'cn_sen':cn_sentence,'phonetic':'','paraphrase':excel_sentence}
  senList.append(senDict)
  return text+'#### CN_means\n>'+cn_sentence+'\n'

if __name__ == '__main__':
  infile_path = r'content.txt'
  input_text = ''
  output_text = ''
  outputFile = 'result.md'
  if len(sys.argv) == 2:
    outputFile = sys.argv[1] + '.md'
  try:
    input_file = open(infile_path,'rb')
    input_text = input_file.read()
  except Exception as es:
    print('read file exception:::-------------------: %s' % str(es))
  sentenceList = getsentencelist(input_text.decode('utf-8'))
  docx_name = 'my-english'
  sentenceNo = 1
  for sen in sentenceList:
    output_text = output_text + processSentence(sen).replace('sentence'+divide_mark,'sentence '+str(sentenceNo)+divide_mark)
    if sentenceNo == 1:
      docx_name = sen[:-1]
    sentenceNo += 1
  try:
    text_file = open(outputFile,'wb')
    text_file.write(output_text.encode('utf-8'))
    #生成doc文档
    #from mydocx import generateDocx
    #generateDocx(docx_name.replace(': ','-').replace(',','').replace('.','').replace('\n','').replace('\r','') + '.docx',output_text)
    #生成excel
    #from myxlsx import genxlsx
    #genxlsx('sentences',senList)
  except Exception as ex:
    print('create file exception:::------'+str(ex))
  finally:
    text_file.close()