# -*- coding: utf-8 -*-
# import jieba,re
# str_text = 'Taylor Swift Just Slammed This Netflix Show for a "Deeply Sexist" Joke'
# str_jing2=jieba.cut(str_text,cut_all=False)
# print('add_word前:'+"/".join(str_jing2))
# 添加自定义词
# jieba.add_word('Deeply Sexist')
# str_jing3=jieba.cut(str_text,cut_all=False)
# print('add_word后:'+"/".join(str_jing3))
# #修正词频
# jieba.suggest_freq('野生动物园',tune=True)
# str_jing4=jieba.cut(str_text,cut_all=False)
# print('suggest_freq后:'+"/".join(str_jing4))
# skey = re.compile('Deeply Sexist')
# sprint(key.findall(str_text))

# from nltk.corpus import treebank
# t = treebank.parsed_sents('E:\work_store\program\python\news.txt')[0]
# t = treebank.parsed_sents('wsj_0001.mrg')[0]
# t.draw()
from datetime import datetime
from myYoudaoFanyi import getWordMean, getSentenceMean, getYoudaoJsonBySentence,getYoudaoJsonPhoneticByWord,getYoudaoJsonMeanByWord
from nltk.stem import SnowballStemmer
#from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from myconstantnlp import stopwordDict
#from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
import nltk
import sys,re

'''分句'''
from nltk.tokenize import sent_tokenize

def getsentencelist(context):
    return sent_tokenize(context)

'''分词'''
def getwordlist(sentence:str):
    return nltk.pos_tag(nltk.word_tokenize(sentence))

'''基于Porter词干提取算法'''
ps = PorterStemmer()
# stem = ps.stem('running')  #  u'multipli'
def psstem(word):
    return ps.stem(word)


'''基于Lancaster 词干提取算法'''
ls = LancasterStemmer()
# ls.stem('multiply')      #  'multiply'

def lsstem(word):
    return ls.stem(word)

# ------ nltk 查单词原形 begin ------#
def get_wordnet_pos(tag:str):
    if tag.startswith('J'):
        return wordnet.ADJ
    elif tag.startswith('N'):
        return wordnet.NOUN
    elif tag.startswith('V'):
        return wordnet.VERB
    elif tag.startswith('R'):
        return wordnet.ADV
    return None

'''WordNetLemmatizer获取词汇'''
wnl = WordNetLemmatizer()
#返回单词原形
def wnllem(word:str,pos:str):
    flag = get_wordnet_pos(pos)
    if not flag:
        return word
    return wnl.lemmatize(word,flag)
# ------ nltk 查找单词原形 end ------#

# '''基于Snowball 词干提取算法   '''
snowball_stemmer = SnowballStemmer('english')
snowball_stemmer.stem('multiply')  # u'multipli'

# from myHaiciFanyi import getWordMean

'''分解一个句字的单词'''
# 句字与词义的dict集合list （音标待添加）
senList = []
divide_mark = '-'
# 记录已查询过的单词
word_record = {}

#组装markdown表格table内容str字符串
def packMarkdownContentStr(*args):
    txt = '|'
    for arg in args:
        txt = f'{txt}{arg}|'
    return txt+'\n'

#组装markdown表格table表头head str字符串
def packMarkdownHeadStr(*args):
    txt = '|'
    for arg in args:
        txt = f'{txt}{arg}|'
    txt += '\n'
    txt += '|'+len(args)*'---|'+'\n'
    return txt

def processSentence(sentence):
    text = '### '+divide_mark*25+'sentence'+divide_mark*25
    text = '%s\n***%s***' % (text, sentence)
    tokens = getwordlist(sentence)
    text = '%s\n\n#### %s' % (text, '*'*10+'words'+'*'*10)
    # 拼装句字与词义list 用来生成excel
    excel_sentence = ''
    for token,pos in tokens:
        if word_record.get(token) != None:
            word_record[token] += 1
            continue
        # 将未查过的词存入word_record
        else:
            word_record[token] = 1
        if not stopwordDict.__contains__(token.lower()):
            _word = wnllem(token,pos)
            ps_word = psstem(token)
            # text = '%s\n%s' % (text,'{0:<20}{1:>6}--->{2:<20}|{3}'.format(token,' ',psstem(token),_word))
            text = '%s\n##### %s' % (
                text, '{0:<20}--->{1:<20}|{2}'.format(token, ps_word, _word))
            # 拼装句字与词义list 用来生成excel
            excel_sentence = '%s\n%s' % (
                excel_sentence, '{0:<20}--->{1:<20}|{2}'.format(token, ps_word, _word))
            wordMean = getWordMean(_word)
            if len(wordMean) < 7:  # wordMean-->[,],\n,[,],\n
                wordMean = getWordMean(ps_word)
            text = '%s\n%s' % (text, wordMean)
            # 拼装句字与词义list 用来生成excel
            excel_sentence = '%s\n%s' % (excel_sentence, wordMean)
    # 英译中
    cn_sentence = getYoudaoJsonBySentence(sentence)  # getSentenceMean(sentence)
    # 拼装句字与词义list 用来生成excel
    senDict = {'sen': sentence, 'cn_sen': cn_sentence,
               'phonetic': '', 'paraphrase': excel_sentence}
    senList.append(senDict)
    return text+'\n#### CN_means\n>'+cn_sentence+'\n'

phonetic_pattern = re.compile('uk: /(.*?)/<br>us: /(.*?)/')
phonetic_pattern_ukorus = re.compile('[uk|us]: /(.*?)/')
def addPhoneticsForSentence(sentence:str,word:str,phonetics:str):
    ph = None
    phonetics_ukandus = re.findall(phonetic_pattern,phonetics)
    if len(phonetics_ukandus) == 1:
        # ph = phonetics_ukandus[0][0]
        ph = phonetics_ukandus[0][1]
    phonetics_ukorus = re.findall(phonetic_pattern_ukorus,phonetics)
    if len(phonetics_ukorus) == 1:
        ph = phonetics_ukorus[0]
    if ph:
        return sentence.replace(word,f'{word}[{ph}]')
        # return sentence.replace(word,f'<ruby>{word}<rt>{ph}</rt></ruby>') # 注音模式
    else:
        return sentence

def processSentenceSimple(sentence:str):
    text = '### '+divide_mark*25+'sentence'+divide_mark*25
    text = '%s\n***%s***' % (text, sentence)
    text = '%s\n\n#### Words\n> ##sentence_with_phonetics##\n\n##tokenphoneticsmeans##\n' % text
    # markdown_line = packMarkdownHeadStr('token','phonetics','ori','means')
    markdown_line = packMarkdownHeadStr('no','token','means','phonetics','ori') # 调整表格字段列顺序
    text = text+'#### CN_means\n>$$cn_sentence$$\n'
    tokens = getwordlist(sentence)
    sentence_with_phonetics = sentence
    #text = '%s\n#### %s' % (text, '*'*10+'words'+'*'*10)
    # 拼装句字与词义list 用来生成excel
    excel_sentence = ''
    no = 1
    for token,pos in tokens:
        if len(re.findall('\d+([.,]{0,1}\d+)*',token))>0:
            continue
        if stopwordDict.__contains__(token.lower()):
            continue
        if word_record.get(token) != None:
            word_record[token] += 1
            continue
        else: # 将未查过的词存入word_record
            word_record[token] = 1
        # print(f'token:{token} ----- {pos}')
        _word = wnllem(token,pos) # 查单词原形
        ps_word = psstem(token)
        # text = '%s\n%s' % (text,'{0:<20}{1:>6}--->{2:<20}|{3}'.format(token,' ',psstem(token),_word))
        #text = '%s\n##### %s' % (text, '{0:<20}--->{1:<20}|{2}'.format(token, ps_word, _word))
        # 拼装句字与词义list 用来生成excel
        excel_sentence = '%s\n%s' % (excel_sentence, '{0:<20}--->{1:<20}|{2}'.format(token, ps_word, _word))
        wordPhonetic = getYoudaoJsonPhoneticByWord(token)
        if wordPhonetic == '':
            wordPhonetic = getYoudaoJsonPhoneticByWord(_word)
        if wordPhonetic == '':
            wordPhonetic = getYoudaoJsonPhoneticByWord(ps_word)
        # markdown_single_line = packMarkdownContentStr(token,wordPhonetic,_word,getYoudaoJsonMeanByWord(token))
        markdown_single_line = packMarkdownContentStr(str(no),token,getYoudaoJsonMeanByWord(token),wordPhonetic,_word) # 调整表格字段列顺序
        no += 1
        # packAlreadySearchWords(token,markdown_single_line) # 待完善
        markdown_line += markdown_single_line
        # wordMean = 'wordMean...\n'  # getWordMean(_word)
        #if len(wordMean) < 7:  # wordMean-->[,],\n,[,],\n
        #    wordMean = getWordMean(ps_word)
        #text = '%s\n%s' % (text, wordMean)
        # 给句子单词注音
        sentence_with_phonetics = addPhoneticsForSentence(sentence_with_phonetics,token,wordPhonetic)
        # 拼装句字与词义list 用来生成excel
        # excel_sentence = '%s\n%s' % (excel_sentence, wordMean)
    text = text.replace('##sentence_with_phonetics##',sentence_with_phonetics)
    text = text.replace('##tokenphoneticsmeans##',markdown_line)
    # 英译中
    cn_sentence = getSentenceMean(sentence) #getYoudaoJsonBySentence(sentence)  # getSentenceMean(sentence)
    text = text.replace('$$cn_sentence$$',cn_sentence)
    # 拼装句字与词义list 用来生成excel
    # senDict = {'sen': sentence, 'cn_sen': cn_sentence,'phonetic': '', 'paraphrase': excel_sentence}
    # senList.append(senDict)
    return text

def writeFileForContent(outputFullName:str,content:str,model='w',encoding='utf-8'):
  with open(outputFullName,model,encoding=encoding) as w:
    w.write(content)

ALREADY_WORDS_DICT = {} # {token:markdownline}
def packAlreadySearchWords(token:str,markdownline:str):
    if not ALREADY_WORDS_DICT.get(token):
        ALREADY_WORDS_DICT[token] = markdownline

if __name__ == '__main__':
    infile_path = r'content.txt'
    input_text = ''
    output_text = ''
    outputFile = ''
    if len(sys.argv) == 2:
        outputFile = sys.argv[1] + '.md'
    else:
        timestamp = datetime.now()
        formatted_dt = timestamp.strftime('%Y%m%d%H%M%S')
        #print("Datetime:", formatted_dt)
        outputFile = f'entcn_result_{formatted_dt}.md'
    try:
        input_file = open(infile_path, 'rb')
        input_text = input_file.read()
    except Exception as es:
        print('read file exception:::-------------------: %s' % str(es))
    sentenceList = getsentencelist(input_text.decode('utf-8'))
    docx_name = 'my-english'
    sentenceNo = 1
    for sen in sentenceList:
        output_text = output_text + processSentenceSimple(sen).replace(
            'sentence'+divide_mark, 'sentence '+str(sentenceNo)+divide_mark)
        if sentenceNo == 1:
            docx_name = sen[:-1]
        sentenceNo += 1
    try:
        text_file = open(outputFile, 'wb')
        text_file.write(output_text.encode('utf-8'))
        # 生成doc文档
        # from mydocx import generateDocx
        # generateDocx(docx_name.replace(': ','-').replace(',','').replace('.','').replace('\n','').replace('\r','') + '.docx',output_text)
        # 生成excel
        # from myxlsx import genxlsx
        # genxlsx('sentences',senList)
    except Exception as ex:
        print('create file exception:::------'+str(ex))
    finally:
        text_file.close()
