# -*- coding=utf-8 -*-
import os
import re,chardet
import pykakasi
import jaconv as jc
from bs4 import BeautifulSoup

#------判断文本编码 begin------#
def detect_encoding(text):
    result = chardet.detect(text)
    encoding = result['encoding']
    confidence = result['confidence']
    return encoding, confidence
def getTxtCharset(filename:str):
    with open(filename, 'rb') as f:
        text = f.read()
    encoding, confidence = detect_encoding(text)
    return encoding
    #print(f"Detected encoding: {encoding}")
    #print(f"Confidence: {confidence}")
#------判断文本编码 end ------#
OUTPUT_CHARSET = 'utf-8'

kakasi = pykakasi.kakasi()


#将日语中的汉字Kanji转为平假名Hiragana
def translate_japanese_kanji_to_hiragana_from_files(input_folder, output_folder):
    # 检查输出文件夹是否存在，如果不存在则创建
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    # 遍历输入文件夹中的所有文件
    for root, dirs, files in os.walk(input_folder):
        for file in files:
            # 只处理.lrc 和.txt 文件
            if file.endswith('.lrc') :#or file.endswith('.txt'):
                input_file_path = os.path.join(root, file)
                current_file_charset = getTxtCharset(input_file_path)
                print('current file : ',file,current_file_charset)
                if current_file_charset == None :
                    continue
                output_file_path = os.path.join(output_folder, file+'.md')
                with open(input_file_path, 'r', encoding=current_file_charset) as input_file:
                    lines = input_file.readlines()
                new_lines = []
                new_lines_lrc_markdown = ['|timemark|ori|fenci|hira|roma|\n','|---|---|---|---|---|\n']
                for line in lines:
                    #print(kakasi.convert(line)) #返回字典列表（返回list对象）
                    index = line.find(']')
                    if index > -1:
                        line_clips = kakasi.convert(line[index+1:].replace('\n',''))
                        hira_line = []
                        for l in line_clips:
                            hira_line.append(l['hepburn'])
                        #print(' '.join(hira_line))
                        new_lines.append(line+line[:index+1]+' '.join(hira_line)+'\n')
                        new_lines_lrc_markdown.append('|%s|%s|%s|%s|%s|\n' % (line[:index+1],line[index+1:].replace('\n',''),' ',' ',jc.kata2alphabet(line[index+1:].replace('\n',''))))
                with open(output_file_path, 'w', encoding='utf-8') as output_file:
                    output_file.writelines(new_lines_lrc_markdown)
#import html2markdown # 支持转化的html标签太少 总之是“不好使”别用了
#import markdown # 只是单向把markdown格式转为html
#import html2text # 把html转为markdown 好使 推荐
#import imgkit # 需要先安装wkhtmltox 配置环境变量path：wkhtmltox的bin文件夹路径

# ------ nltk 查单词原形 begin ------#
import nltk
from nltk.corpus import wordnet
#from nltk.stem import WordNetLemmatizer
from nltk.stem.wordnet import WordNetLemmatizer
def get_wordnet_pos(tag:str):
    if tag.startswith('J'):
        return wordnet.ADJ
    elif tag.startswith('N'):
        return wordnet.NOUN
    elif tag.startswith('V'):
        return wordnet.VERB
    elif tag.startswith('R'):
        return wordnet.ADV
# ------ nltk 查找单词原形 end ------#
# ------ post 请求示例 begin ------#
import requests

# 定义请求的URL
HEADERS_FOR_EZ = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
        #"Cookie":"",
        "Host":"www.ezlang.net",
        "X-Requested-With": "XMLHttpRequest", # ajax关键参数
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Origin":'https://www.ezlang.net',
        "Referer":"https://www.ezlang.net/zh-Hans/tool/kana"}
def request_post():
    url = 'https://www.ezlang.net/ajax/tool_data.php'
    # 定义请求的数据，这里使用字典形式
    data = {
        'txt': 'あんなに愛した君がいない',
        'sn': 'kana'
    }

    try:
        # 发送POST请求
        response = requests.post(url, data=data,headers=HEADERS_FOR_EZ)

        # 检查响应状态码
        if response.status_code == 200:
            # 打印响应的JSON数据
            r_info = response.json()
            print(r_info[1])

        else:
            print(f"请求失败，状态码: {response.status_code}")
    except requests.RequestException as e:
        print(f"请求发生错误: {e}")

def request_post_test():
    send_sentence = 'あんなに愛した君がいない'
    res = '<div class="line"><span class="morpheme">あんな</span><span class="morpheme">に</span><span class="morpheme"><ruby>愛<rp>[</rp><rt>あい</rt><rp>]</rp></ruby></span><span class="morpheme">し</span><span class="morpheme">た</span><span class="morpheme"><ruby>君<rp>[</rp><rt>きみ</rt><rp>]</rp></ruby></span><span class="morpheme">が</span><span class="morpheme">い</span><span class="morpheme">ない</span></div>'
    span_pattern = re.compile('<span class="morpheme">(.*?)</span>')
    kanji_hira_pattern = re.compile('<ruby>(.*?)<rp>\[</rp><rt>(.*?)</rt><rp>\]</rp></ruby>')
    htmls = re.findall(span_pattern,res)
    kanji_hira = re.findall(kanji_hira_pattern,res)
    #print(kanji_hira)
    for t in kanji_hira:
        send_sentence = send_sentence.replace(t[0],t[1])
    print(send_sentence)
    ka = kakasi.convert(send_sentence)
    print(ka)

HEADER_COMMON = {
    'Accept':'*/*'
    ,'Accept-Encoding':'gzip, deflate, br, zstd'
    ,'Accept-Language':'zh-CN,zh;q=0.9'
    ,'Connection':'keep-alive'
    ,'Content-Length':'107'
    ,'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8'
    ,'Cookie':'acw_tc=1a0c39d417423159329286704e008939cded90bfa96037587b4f4fda3e8706; HJ_UID=96482059-5994-86cf-4d9b-e8b1004f6203; HJ_CST=1; HJ_CSST_3=1; TRACKSITEMAP=3; _REF=https%3A%2F%2Fcn.bing.com%2F; _SREF_3=https%3A%2F%2Fcn.bing.com%2F; HJ_SID=fph8r4-6572-4d62-a930-e87d39db1704; HJ_SSID_3=fph8r4-2cf7-4794-8254-e74a77d02999; _SREG_3=cn.bing.com%7C%7Csearch%7Cdomain; _REG=cn.bing.com%7C%7Csearch%7Cdomain'
    ,'Host':'dict.hjenglish.com'
    ,'Origin':'https://dict.hjenglish.com'
    ,'Referer':'https://dict.hjenglish.com/app/trans'
    ,'Sec-Fetch-Dest':'empty'
    ,'Sec-Fetch-Mode':'cors'
    ,'Sec-Fetch-Site':'same-origin'
    ,'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
    ,'X-Requested-With':'XMLHttpRequest'
    ,'sec-ch-ua':'"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"'
    ,'sec-ch-ua-mobile':'?0'
    ,'sec-ch-ua-platform':'"Windows"'

}
def request_post_common(url:str,data:dict,header:dict):
    try:
        # 发送POST请求
        response = requests.post(url, data=data,headers=header)
        # 检查响应状态码
        if response.status_code == 200:
           return response
        else:
            print(f"请求失败，状态码: {response.status_code}")
    except requests.RequestException as e:
        print(f"请求发生错误: {e}")

# ------ post 请求示例 end ------#
import time,json
# ------ ezlang.net网站 日文汉字转平假名 begin ------#

'''get_hira_via_request_post_by_sentence()函数返回数据样式：
res = '<div class="line"><span class="morpheme">あんな</span><span class="morpheme">に</span><span class="morpheme"><ruby>愛<rp>[</rp><rt>あい</rt><rp>]</rp></ruby></span><span class="morpheme">し</span><span class="morpheme">た</span><span class="morpheme"><ruby>君<rp>[</rp><rt>きみ</rt><rp>]</rp></ruby></span><span class="morpheme">が</span><span class="morpheme">い</span><span class="morpheme">ない</span></div>'
'''
headers_cookie_ezlang = ''
def get_jp_kanji_hira_request_post_by_sentence(sentence:str):
    url = 'https://www.ezlang.net/ajax/tool_data.php'
    # 定义请求的数据，这里使用字典形式
    data = {
        'txt': sentence,
        'sn': 'kana'
    }
    try:
        # 发送POST请求
        response = requests.post(url, data=data,headers=HEADERS_FOR_EZ)

        # 检查响应状态码
        if response.status_code == 200:
            # 打印响应的JSON数据
            if not HEADERS_FOR_EZ.get('Cookie'):
                HEADERS_FOR_EZ['Cookie'] = response.headers.get('set-cookie')
            r_info = response.json()
            return getJaKanjiHiraByPostResponse(r_info[1],sentence)
        else:
            #print(f"请求失败，状态码: {response.status_code}")
            return ''
    except requests.RequestException as e:
        #print(f"请求发生错误: {e}")
        return ''

def request_get(url:str,word:str):
    url = 'https://jisho.org/search/%E8%A8%98%E6%86%B6'
    try:
        # 发送 get 请求
        response = requests.get(url)
        # 检查响应状态码
        if response.status_code == 200:
            return response.text
        else:
            #print(f"请求失败，状态码: {response.status_code}")
            return f"请求失败，状态码: {response.status_code}"
    except requests.RequestException as e:
        #print(f"请求发生错误: {e}")
        return f"请求发生错误: {e}"

# 正则匹配pattern
span_pattern = re.compile('<span class="morpheme">(.*?)</span>')
kanji_hira_pattern = re.compile('<ruby>(.*?)<rp>\[</rp><rt>(.*?)</rt><rp>\]</rp></ruby>')
def getJaKanjiHiraByPostResponse(resp:str,origin:str):
    #htmls = re.findall(span_pattern,resp)
    kanji_hira = re.findall(kanji_hira_pattern,resp)
    #print(kanji_hira)
    for t in kanji_hira:
        origin = origin.replace(t[0],t[1])
    #print(origin)
    return origin
# ------ ezlang.net网站 日文汉字转平假名 end ------#

# ------ 在jisho.org网站上查询日文汉字的训读与音读 begin ------ #
kanji_dict = {} # {'汉字':出现次数}
kanji_kun_on_reading_dict = {} # {'汉字'：['字,训读,音读',...]} csv格式数组
def processJpKanjiKunAndOnReading(_file:str):
    print('start ...')
    with open(_file,'r',encoding='utf-8') as kan:
        kanjiTxt = kan.read()
    kanjiTxt = kanjiTxt.replace('\n','').replace('\r','').replace(' ','')
    kanjiList = kanjiTxt.split(',')
    token_sum = len(kanjiList)
    for kanji in kanjiList:
        if kanji == '':
            continue
        if kanji_dict.get(kanji):
            kanji_dict[kanji] += 1
        else:
            kanji_dict[kanji] = 1
            print('remain tokens:%(sum) -3d' % {'sum':token_sum},end='\r',flush=True)
            kanji_kun_on_reading_dict[kanji] = getJpKanjiKunAndOnReadingFromJishoorg(kanji)
            token_sum -= 1
    content_lines = ['token,kun,on\n']
    for k,v in kanji_kun_on_reading_dict.items():
        content_lines.append('\n'.join(v)+'\n')
    with open(f'{_file}.csv','w',encoding='utf-8') as c:
        c.writelines(content_lines)
    print('All done.%(sum) -20s' % {'sum':' '},flush=True)
def get_html_from_jishoorg_by_word(word:str):
    url = f'https://jisho.org/search/{word}'
    try:
        # 发送 get 请求
        response = requests.get(url)
        # 检查响应状态码
        if response.status_code == 200:
            return response.text
        else:
            #print(f"请求失败，状态码: {response.status_code}")
            return f"请求失败，状态码: {response.status_code}"
    except requests.RequestException as e:
        #print(f"请求发生错误: {e}")
        return f"请求发生错误: {e}"
def getJpKanjiKunAndOnReadingFromJishoorg(word:str):
    html = get_html_from_jishoorg_by_word(word)
    # with open('./jishoorg.html','r',encoding='utf-8') as f:
    #     html = f.read()
    bsp = BeautifulSoup(html,'html.parser')
    target_content = bsp.find_all('div',class_='kanji_light_content')
    csv_lines = [] # 'token,kun,on'
    for t in target_content:
        # print(10*'-')
        token_word,token_kun,token_on = ('','','')
        token_word_element = getListTargetElement(t.find_all('div',class_='literal_block'))
        if '' != token_word_element:
            token_word = getListTargetElement(token_word_element.find_all('a'))
        token_kun_element = getListTargetElement(t.find_all('div',class_='kun readings'))
        if '' != token_kun_element:
            token_kun = getListTargetElement(token_kun_element.find_all('a'))
        token_on_element = getListTargetElement(t.find_all('div',class_='on readings'))
        if '' != token_on_element:
            token_on = getListTargetElement(token_on_element.find_all('a'))
        # print(f'{token_word}\nkun:{token_kun}\non:{token_on}')
        # print(10*'+')
        csv_lines.append(f'{token_word.strip()},{token_kun.strip()},{token_on.strip()}')
    with open('./jpkanjikunandon.csv','w',encoding='utf-8') as k:
        k.write('\n'.join(csv_lines))
    return csv_lines
# ------ 在jisho.org网站上查询日文汉字的训读与音读 end ------ #

def getListTargetElement(elementList:list):
    txt = ''
    if elementList != None and len(elementList)>0:
        for e in elementList:
            if str(e).startswith('<a '):
                txt += e.string+'  '
            else:
                return e
    return txt
if __name__ == '__main__' :
    pass
    resp = request_post_common('https://dict.hjenglish.com/v10/dict/translation/jp/cn',{'content':'僕と踊ってくれないか光と影'},HEADER_COMMON)
    print(resp.json()['data']['content'])
    #request_post()
    #------ process 日文汉字 begin ------#
    # processJpKanjiKunAndOnReading('./kanji.txt')
    #------ process 日文汉字 end ------#
    #------ get Jisho.org 音读与训读 begin  ------#
    # getJpKanjiKunAndOnReadingFromJishoorg('暗闇')
    #------ get Jisho.org 音读与训读 end  ------#
    #------ 倒数效果 test begin ------#
    #for m in ['-','\\','|','/']:
    #    print(m,end='',flush=True)
    #    print('\b',end='')
    #    time.sleep(0.2)
    #sentences = ['あんなに愛した君がいない','夢のような思い出も']
    #sum = 15 #len(sentences)
    #for sen in range(sum):
    #    n = len(str(sum))
    #    print('%(file)s remain lines:%(sum) -3d' % {'file':'filename','sum':sum},end='\r',flush=True)
    #    time.sleep(0.2) # get_jp_kanji_hira_request_post_by_sentence(sen)
    #    sum -=1
    #print('%(file)s done.%(sum) -20s' % {'file':'filename','sum':' '},flush=True) # 文件名 done.
    #------ 倒数效果 test end ------#

    # 使用示例
    # ------ nltk 查单词原形 begin ------#
    #wnl = WordNetLemmatizer()
    #sentence = 'the results came faster than anyone could have imagined.'
    #tokens = nltk.word_tokenize(sentence)
    #tagg = nltk.pos_tag(tokens)
    #print(tagg)
    #for k,v in tagg:
    #    if v in ('NNS','VBS','JJR','NN','VB','VBN'):
    #        print(wnl.lemmatize(k,get_wordnet_pos(v)))
    # ------ nltk 查单词原形 end ------#
    # ------ nltk 查单词音标 begin ------#
    # pron = nltk.corpus.cmudict.dict()
    # word = 'wonder'
    # if word in pron:
    #     print(pron[word.lower()])
    # else:
    #     print(word+' none')
    # print(nltk.help.upenn_tagset())
    # ------ nltk 查单词音标 end ------#
    #input_folder = 'C:/E/codes/python/mygithub/hugo-roar/'
    #output_folder = 'C:/E/codes/python/nlp/'
    #translate_japanese_kanji_to_hiragana_from_files(input_folder, output_folder)
    #import jaconv as jc
    #sent = 'あんなに愛した君がいない'
    #print(jc.kata2alphabet(sent))
    #print(kakasi.convert('あんなに愛した君がいない'))
    #temp_files = ['result.md','result1.md','result5.md']
    '''
    config = imgkit.config()
    option = {
        'encoding':'utf-8'
    }
    temp_files = ['result1.html','result5.html','result.html']
    for f in temp_files:
        imgkit.from_file(os.path.join(input_folder,f),os.path.join(input_folder,f.replace('.html','.jpg')),options=option)
        #output_file_text = ''
        #with open(os.path.join(input_folder,f),'r',encoding='utf-8') as m:
        #    output_file_text = m.read()
        #with open(os.path.join(input_folder,f.replace('.md','.html')),'w',encoding='utf-8') as o:
        #    o.write(markdown.markdown(output_file_text, extensions=['markdown.extensions.tables']))
    '''

