#-*- coding: utf-8 -*-

import requests
import json

def get_phonetic(word):
    url = f"https://dict.youdao.com/jsonapi?q={word}"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"}
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        data = response.json()
        # 提取音标信息
        if 'simple' in data:
            simple_data = data['simple']
            if 'word' in simple_data and len(simple_data['word']) > 0:
                word_data = simple_data['word'][0]
                if 'ukphone' in word_data and 'usphone' in word_data:
                    uk_phonetic = word_data['ukphone']
                    us_phonetic = word_data['usphone']
                    return f"英式音标: [{uk_phonetic}], 美式音标: [{us_phonetic}]"
    except requests.RequestException as e:
        print(f"请求出错: {e}")
    except (KeyError, IndexError, json.JSONDecodeError):
        print("解析数据出错，可能是接口返回格式有变化。")
    return "未找到该单词的音标信息。"


def packMarkdownContentStr(*args):
    txt = '|'
    for arg in args:
        txt = f'{txt}{arg}|'
    return txt+'\n'

def packMarkdownHeadStr(*args):
    txt = '|'
    for arg in args:
        txt = f'{txt}{arg}|'
    txt += '\n'
    txt += '|'+len(args)*'---|'+'\n'
    return txt

# ------ nltk 查单词原形 begin ------#
import nltk
#nltk.download('averaged_perceptron_tagger_eng')
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
#?from nltk.stem.wordnet import WordNetLemmatizer
def get_wordnet_pos(tag:str):
    if tag.startswith('J'):
        return wordnet.ADJ
    elif tag.startswith('N'):
        return wordnet.NOUN
    elif tag.startswith('V'):
        return wordnet.VERB
    elif tag.startswith('R'):
        return wordnet.ADV
# ------ nltk 查找单词原形 end ------#

# ------- 日文测试 begin ------#
import requests

def get_japanese_pronunciation(kanji):
    url = f"https://jisho.org/api/v1/search/words?keyword={kanji}"
    try:
        response = requests.get(url)
        response.raise_for_status()
        data = response.json()
        if data['data']:
            for item in data['data']:
                senses = item.get('senses', [])
                japanese = item.get('japanese', [])
                if japanese:
                    for j in japanese:
                        reading = j.get('reading')
                        if reading:
                            return reading
        return None
    except requests.RequestException as e:
        print(f"请求出错: {e}")
        return None


import pykakasi

import MeCab

def get_pronunciation_mecab(text):
    #tagger = MeCab.Tagger('-d /usr/local/lib/mecab/dic/unidic')
    
    tagger = MeCab.Tagger(r'C:\D\tools\Python\Python310\Lib\site-packages\unidic_lite')
    node = tagger.parseToNode(text)
    pronunciation = []
    while node:
        feature = node.feature.split(',')
        if len(feature) > 7:
            reading = feature[7]
            if reading != '*':
                pronunciation.append(reading)
        node = node.next
    return ''.join(pronunciation)


# ------- 日文测试 end ------#

if __name__ == '__main__':
    pass
    # 测试示例
    #word = "wonder"
    #phonetic_info = get_phonetic(word)
    #print(f"{word}: {phonetic_info}")
    #print(packMarkdownHeadStr('a','b','b','b','b','b','b')+packMarkdownContentStr(1,2,3,4,5,6,7))
    #print(packMarkdownHeadStr('h','t','j')+packMarkdownContentStr(1,2,7))
    # 使用示例
    # ------ nltk 查单词原形 begin ------#
    #wnl = WordNetLemmatizer()
    #sentence = 'the results came faster than anyone could have imagined.'
    #tokens = nltk.word_tokenize(sentence)
    #tagg = nltk.pos_tag(tokens)
    #print(tagg)
    #for k,v in tagg:
    #    if v in ('NNS','VBS','JJR','NN','VB','VBN'):
    #        print(wnl.lemmatize(k,get_wordnet_pos(v)))
    # ------ nltk 查单词原形 end ------#
    # 测试 pykakasi
    #kk = pykakasi.Kakasi()
    #ph = kk.convert('果てない暗闇から飛び出そう')
    #hira_ph = []
    #for p in ph:
    #    #print(p)
    #    for k,v in p.items():
    #        if k=='hira':
    #            hira_ph.append(v)
    #print(''.join(hira_ph))

    # 测试 mecab
    #text = "何が起きたって"
    #pronunciation = get_pronunciation_mecab(text)
    #print(f"{text} 的发音是: {pronunciation}")

    # 测试 访问网isho.org网站
    #kanji = "何が起きたって"
    #pronunciation = get_japanese_pronunciation(kanji)
    #if pronunciation:
    #    print(f"{kanji} 的发音是: {pronunciation}")
    #else:
    #    print(f"未找到 {kanji} 的发音信息。")

    # 测试日语歌词 英语位置
    lrc1 = 'DAN DAN 心魅かれてく'
    lrc2 = 'とびだそう Hold my hand'
    print(lrc1.find('DAN DAN'))
    print(lrc2.index('Hold'))
    import re
    en_pattern = re.compile('[a-zA-Z]')
    match = re.search(en_pattern,lrc2)
    print(match.start())
    match = re.search(en_pattern,lrc1)
    print(match.start())