import os
import re
import requests
import chardet
import cutlet,romkan
from langdetect import detect
from myJapanFenci import getJapanWordList,splitSentence

#------判断文本编码 begin------#
def detect_encoding(text):
    result = chardet.detect(text)
    encoding = result['encoding']
    confidence = result['confidence']
    return encoding, confidence
def getTxtCharset(filename:str):
    with open(filename, 'rb') as f:
        text = f.read()
    encoding, confidence = detect_encoding(text)
    return encoding
    #print(f"Detected encoding: {encoding}")
    #print(f"Confidence: {confidence}")
#------判断文本编码 end ------#
OUTPUT_CHARSET = 'utf-8'

non_text_pattern = re.compile('[^\w\s\u4e00-\u9fff\u3040-\u309f\u30a0-\u30ff]')
del_english_pattern= re.compile('[a-zA-Z]')

# 定义一个函数，用于检查字符串中是否包含日文汉字
def has_japanese_characters(text):
    # 日文汉字的 Unicode 范围
    japanese_char_pattern = re.compile(r'[\u4e00-\u9fff\u3400-\u4dbf\u4e00-\u9faf\u3005-\u3007\u3021-\u3029]')
    return bool(japanese_char_pattern.search(text))

def has_japanese_kanji(text):
    # 定义日文汉字的 Unicode 范围
    kanji_ranges = [
        (0x4E00, 0x9FFF),  # CJK 统一汉字
        (0x3400, 0x4DBF),  # CJK 统一汉字扩展 A
        (0x20000, 0x2A6DF),  # CJK 统一汉字扩展 B
        (0x2A700, 0x2B73F),  # CJK 统一汉字扩展 C
        (0x2B740, 0x2B81F),  # CJK 统一汉字扩展 D
        (0x2B820, 0x2CEAF),  # CJK 统一汉字扩展 E
        (0x2CEB0, 0x2EBEF),  # CJK 统一汉字扩展 F
        (0x30000, 0x3134F),  # CJK 统一汉字扩展 G
        (0x31350, 0x323AF)   # CJK 统一汉字扩展 H
    ]
    # 遍历字符串中的每个字符
    for char in text:
        # 获取字符的 Unicode 码点
        code_point = ord(char)
        # 检查码点是否在日文汉字的范围内
        for start, end in kanji_ranges:
            if start <= code_point <= end:
                return True
    return False

katsu = cutlet.Cutlet()
katsu.use_foreign_spelling = False
#print(katsu.romaji('あんなに愛した君がいない'))
#print(katsu.romaji('こころひかれてく'))
#fenci = getJapanWordList('あんなに愛した君がいない').replace('\n','')
#print(fenci)
#exit()

#组装markdown表格table内容str字符串
def packMarkdownContentStr(*args):
    txt = '|'
    for arg in args:
        txt = f'{txt}{arg}|'
    return txt+'\n'

#组装markdown表格table表头head str字符串
def packMarkdownHeadStr(*args):
    txt = '|'
    for arg in args:
        txt = f'{txt}{arg}|'
    txt += '\n'
    txt += '|'+len(args)*'---|'+'\n'
    return txt

#把lrc文件中的日文汉字转为平假名和罗马音，以markdown格式 生成含有原文、平假名、罗马音的md文件
def process_lrc_jp_hira_roma_to_md(folder_path,new_folder_path):
    if not os.path.exists(new_folder_path):
        os.mkdir(new_folder_path)
    # 遍历文件夹中的所有文件
    for root, dirs, files in os.walk(folder_path):
        dirs[:] = [] # dirs设置为空，只遍历一级目录
        for file in files:
            # 检查文件扩展名是否为 .lrc 或 .txt
            if file.endswith(('.lrc')):
                file_path = os.path.join(root, file)
                input_charset = getTxtCharset(file_path)
                new_file_path = os.path.join(new_folder_path,file+'.md')
                new_lines_markdown = [f"# {file.replace('.lrc','')}\n",packMarkdownHeadStr('timestamp','ori','fenci','hira','roma')]
                # 读取文件内容
                with open(file_path, 'r', encoding=input_charset) as f:
                    lines = f.readlines()
                lines_sum = len(lines)
                for line in lines:
                    #print(line)
                    line = line.replace('\n','').replace('\r','')
                    index = line.find(']')
                    if index > -1 :
                        no_timestamps = line[index+1:]
                    else:
                        continue
                    no_timestamps_no_non_text_no_en = get_no_en_no_nontext(no_timestamps)
                    # 检查文件中是否包含日文汉字
                    try:
                        lang = detect(no_timestamps_no_non_text_no_en)
                    except Exception:
                        lang = ''
                    if lang in ['ja']:
                        print('%(file)s remain lines:%(sum) -3d' % {'file':file,'sum':lines_sum},end='\r',flush=True) # 文件名 remain lines: lines_sum
                        ja_split = getJapanWordList(no_timestamps_no_non_text_no_en) # 分词后的原句
                        romaji = katsu.romaji(ja_split) # 罗马音
                        hir = romkan.to_hiragana(romaji) # 平假名
                        new_lines_markdown.append(packMarkdownContentStr(line[:index+1],no_timestamps,no_timestamps.replace(no_timestamps_no_non_text_no_en,ja_split),no_timestamps.replace(no_timestamps_no_non_text_no_en,hir),no_timestamps.replace(no_timestamps_no_non_text_no_en,romaji)))
                    else :
                        #new_lines.append(line+'\n')
                        new_lines_markdown.append(packMarkdownContentStr(line[:index+1],no_timestamps,'','',''))
                    lines_sum -= 1
                with open(new_file_path,'w',encoding=OUTPUT_CHARSET) as nf:
                    nf.writelines(new_lines_markdown)
                print('%(file)s done.%(sum) -20s' % {'file':file,'sum':' '},flush=True) # 文件名 done.

#把lrc文件中的日文汉字转为平假名和罗马音，生成含有原文、平假名、罗马音的lrc文件
def process_lrc_Jp_Hira_Roma(folder_path,new_folder_path):
    if os.path.join(folder_path,'a') == os.path.join(new_folder_path,'a'):
        new_folder_path = os.path.join(new_folder_path,'newlrc')
    if not os.path.exists(new_folder_path):
        os.mkdir(new_folder_path)
    # 遍历文件夹中的所有文件
    for root, dirs, files in os.walk(folder_path):
        dirs[:] = [] # dirs设置为空，只遍历一级目录
        for file in files:
            # 检查文件扩展名是否为 .lrc
            if file.endswith(('.lrc')):
                file_path = os.path.join(root, file)
                input_charset = getTxtCharset(file_path)
                print(file)
                new_file_path = os.path.join(new_folder_path,file)
                new_lines = []
                # 读取文件内容
                with open(file_path, 'r', encoding=input_charset) as f:
                    lines = f.readlines()
                lines_sum = len(lines)
                for line in lines:
                    line = line.replace('\n','').replace('\r','')
                    index = line.find(']')
                    if index > -1 :
                        no_timestamps = line[index+1:]
                    no_timestamps_no_non_text_no_en = get_no_en_no_nontext(no_timestamps)
                    # 检查文件中是否包含日文汉字
                    try:
                        lang = detect(no_timestamps_no_non_text_no_en)
                    except Exception:
                        lang = ''
                    if lang in ['ja']:
                        print('%(file)s remain lines:%(sum) -3d' % {'file':file,'sum':lines_sum},end='\r',flush=True) # 文件名 remain lines: lines_sum
                        no_timestamps_no_non_text_no_en_split = getJapanWordList(no_timestamps_no_non_text_no_en)
                        romaji = katsu.romaji(no_timestamps_no_non_text_no_en_split)
                        hir = romkan.to_hiragana(romaji)
                        new_lines.append(line.replace(no_timestamps_no_non_text_no_en,no_timestamps_no_non_text_no_en_split)+'\n')
                        new_lines.append(line.replace(no_timestamps_no_non_text_no_en,hir)+'\n')
                        # new_lines.append(line.replace(no_timestamps_no_non_text_no_en,romaji)+'\n')
                    else :
                        new_lines.append(line+'\n')
                with open(new_file_path,'w',encoding=OUTPUT_CHARSET) as nf:
                    nf.writelines(new_lines)
                print('%(file)s done.%(sum) -20s' % {'file':file,'sum':' '},flush=True) # 文件名 done.

def process_lrc_jp_hira_roma_by_web(folder_path:str,new_folder_path:str):
    if os.path.join(folder_path,'a') == os.path.join(new_folder_path,'a'):
        new_folder_path = os.path.join(new_folder_path,'newlrc')
    if not os.path.exists(new_folder_path):
        os.mkdir(new_folder_path)
    # 遍历文件夹中的所有文件
    for root, dirs, files in os.walk(folder_path):
        dirs[:] = [] # dirs设置为空，只遍历一级目录
        for file in files:
            # 检查文件扩展名是否为 .lrc
            if file.endswith(('.lrc')):
                file_path = os.path.join(root, file)
                input_charset = getTxtCharset(file_path)
                new_lines = []
                # 读取文件内容
                with open(file_path, 'r', encoding=input_charset) as f:
                    lines = f.readlines()
                lines_sum = len(lines)
                for line in lines:
                    #print(line)
                    line = line.replace('\n','').replace('\r','')
                    index = line.find(']')
                    if index > -1 :
                        no_timestamps = line[index+1:]
                    else:
                        #print(line)
                        #no_timestamps = line
                        continue
                    no_timestamps_no_non_text_no_en = get_no_en_no_nontext(no_timestamps)
                    # 检查文件中是否包含日文汉字
                    try:
                        lang = detect(no_timestamps_no_non_text_no_en)
                    except Exception:
                        lang = ''
                    if lang in ['ja']:
                        print('%(file)s remain lines:%(sum) -3d' % {'file':file,'sum':lines_sum},end='\r',flush=True) # 文件名 remain lines: lines_sum
                        d = packDictOfJpByWeb(no_timestamps_no_non_text_no_en)
                        hira = d['hira']
                        roma = d['roma']
                        new_lines.append(line+'\n')
                        new_lines.append(line.replace(no_timestamps_no_non_text_no_en,hira)+'\n')
                        new_lines.append(line.replace(no_timestamps_no_non_text_no_en,roma)+'\n')
                        lines_sum -= 1
                    else:
                        new_lines.append(line+'\n')
                with open(os.path.join(new_folder_path,file),'w',encoding=OUTPUT_CHARSET) as w:
                    w.writelines(new_lines)
                print('%(file)s done.%(sum) -20s' % {'file':file,'sum':' '},flush=True) # 文件名 done.


# 去掉英文和非文字字符
def get_no_en_no_nontext(txt:str):
    # 去掉非文字字符
    no_timestamps_no_non_text = non_text_pattern.sub('',txt).strip(' ')
    if no_timestamps_no_non_text == '' or no_timestamps_no_non_text == None:
        return ''
    # 去掉英文
    return del_english_pattern.sub('',no_timestamps_no_non_text).strip(' ')

# 使用本地安装好的python模块 实现日文汉字到假名罗马音的转换
# 模块：nltk mecab cutlet romkan
def getJaKanjiHiraByModel(sentence:str):
    pass

# ------ ezlang.net网站 日文汉字转平假名 begin ------#
# 定义请求的URL
HEADERS_FOR_EZ = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
        #"Cookie":"",
        "Host":"www.ezlang.net",
        "X-Requested-With": "XMLHttpRequest", # ajax关键参数
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Origin":'https://www.ezlang.net',
        "Referer":"https://www.ezlang.net/zh-Hans/tool/kana"
        }

'''get_hira_via_request_post_by_sentence()函数返回数据样式：
res = '<div class="line"><span class="morpheme">あんな</span><span class="morpheme">に</span><span class="morpheme"><ruby>愛<rp>[</rp><rt>あい</rt><rp>]</rp></ruby></span><span class="morpheme">し</span><span class="morpheme">た</span><span class="morpheme"><ruby>君<rp>[</rp><rt>きみ</rt><rp>]</rp></ruby></span><span class="morpheme">が</span><span class="morpheme">い</span><span class="morpheme">ない</span></div>'
'''
headers_cookie_ezlang = ''
# 使用网络服务转日文汉字为假名
def get_jp_kanji_hira_request_post_by_sentence(sentence:str):
    url = 'https://www.ezlang.net/ajax/tool_data.php'
    # 定义请求的数据，这里使用字典形式
    data = {
        'txt': sentence,
        'sn': 'kana'
    }
    try:
        # 发送POST请求
        response = requests.post(url, data=data,headers=HEADERS_FOR_EZ)
        #print(sentence)
        # 检查响应状态码
        if response.status_code == 200:
            # 打印响应的JSON数据
            if not HEADERS_FOR_EZ.get('Cookie'):
                HEADERS_FOR_EZ['Cookie'] = response.headers.get('set-cookie')
            r_info = response.json()
            return getJaKanjiHiraByPostResponse(r_info[1],sentence)
        else:
            #print(f"请求失败，状态码: {response.status_code}")
            return ''
    except requests.RequestException as e:
        #print(f"请求发生错误: {e}")
        return ''

# 正则匹配pattern
span_pattern = re.compile('<span class="morpheme">(.*?)</span>')
kanji_hira_pattern = re.compile('<ruby>(.*?)<rp>\[</rp><rt>(.*?)</rt><rp>\]</rp></ruby>')
def getJaKanjiHiraByPostResponse(resp:str,origin:str):
    #htmls = re.findall(span_pattern,resp)
    kanji_hira = re.findall(kanji_hira_pattern,resp)
    #print(kanji_hira)
    for t in kanji_hira:
        origin = origin.replace(t[0],t[1])
    #print(origin)
    return origin

# 封装jp 原句、平假名、罗马音进dict
def packDictOfJpByWeb(sentence:str):
    dictJp = {'orig':sentence}
    hira = getJapanWordList(get_jp_kanji_hira_request_post_by_sentence(sentence))
    dictJp['hira'] = getJapanWordList(hira)
    dictJp['roma'] = romkan.to_roma(hira)
    return dictJp
# ------ ezlang.net网站 日文汉字转平假名 end ------#

def process_jp_txt():
    file_path = './content.txt'
    input_charset = getTxtCharset(file_path)
    # 读取文件内容
    #content = ''
    with open(file_path, 'r', encoding=input_charset) as f:
        content = f.read().replace('\n','').replace('？','。').replace('?','。')
    sentences = splitSentence(content)
    for sen in sentences:
        words = getJapanWordList(sen)
        roma = katsu.romaji(words)
        hir = romkan.to_hiragana(roma)
        print(words)
        print(hir)
        print(roma+'\n')

if __name__ == "__main__":
    pass
    #print(packDictOfJpByWeb('心魅かれてく'))
    #getJaKanjiHiraByPostResponse(res,'あんなに愛した君がいない')
    # 指定要处理的文件夹路径
    folder_path = 'C:/E/work_myself/python_study/nlp/'  # 当前文件夹
    new_folder_path = 'C:/E/work_myself/python_study/nlp/' #目标文件夹
    # process_lrc_jp_hira_roma_to_md(folder_path,new_folder_path)
    process_lrc_Jp_Hira_Roma(folder_path,new_folder_path)
    # process_lrc_jp_hira_roma_by_web(folder_path,new_folder_path)
    '''katsu = cutlet.Cutlet()
    katsu.use_foreign_spelling = False
    print(katsu.romaji('あんなに愛した君がいない')) #Anna ni aishita kimi ga inai
    kunrei = cutlet.Cutlet('kunrei')
    print(kunrei.romaji('あんなに愛した君がいない')) #Anna ni aisita kimi ga inai
    print(romkan.to_roma(katsu.romaji('あんなに愛した君がいない'))) # Anna ni aishita kimi ga inai
    print(romkan.to_hiragana(katsu.romaji('あんなに愛した君がいない'))) # あんあ に あいした きみ が いない
    print(romkan.to_hiragana('Anna ni aisita kimi ga inai'))'''
    #process_jp_txt()
