import time

import jieba.posseg as pseg
import random

import torch
import yaml
from gxl_ai_utils.utils import utils_file
from torch.nn.utils.rnn import pad_sequence

from wenet.utils.init_tokenizer import init_tokenizer


def extract_hot_words_with_order_old(text):
    # 分词并标注词性，同时保留词语在原文中的顺序
    words = pseg.cut(text)
    word_list = [(word.word, word.flag, idx) for idx, word in enumerate(words)]
    total_words = len(word_list)

    # 过滤出名词和其他词性
    nouns = [word for word in word_list if word[1].startswith('n')]
    others = [word for word in word_list if not word[1].startswith('n')]

    # 计算抽取数量
    max_hot_words = min(total_words // 2, len(nouns) + len(others))
    if max_hot_words == 0:
        return []

    # 抽取数量概率分布
    # num_hot_words = 1 if random.random() < 0.9 else random.randint(2, max_hot_words)
    if random.random() < 0.9:  # 90% 的概率抽取 1 个词
        num_hot_words = 1
    else:
        # 为 2 到 max_hot_words 分配权重，数字越大概率越小
        if max_hot_words > 2:
            weights = [1 / (i - 1) for i in range(2, max_hot_words + 1)]  # 权重与数字成反比
            num_hot_words = random.choices(range(2, max_hot_words + 1), weights=weights)[0]
        else:
            num_hot_words = 1

    # 抽取热词
    hot_words = []
    for _ in range(num_hot_words):
        if random.random() < 0.8 and nouns:
            hot_words.append(random.choice(nouns))  # 随机抽取名词
        elif others:
            hot_words.append(random.choice(others))  # 随机抽取其他词性

    # 按照原文中的顺序排序
    hot_words.sort(key=lambda x: x[2])
    # 返回词语列表
    res_list = [word[0] for word in hot_words]
    unique_list = list(dict.fromkeys(res_list))
    return unique_list


import random

import random


def extract_hot_words_with_order(text):
    # 根据概率分布随机选择提取的词数
    num_words = random.choices([0, 1, 2, 3], weights=[40, 45, 10, 5])[0]

    if num_words == 0 or len(text) < 2:
        return []

    selected_words = []
    last_end_idx = 0

    for _ in range(num_words):
        remaining_length = len(text) - last_end_idx
        if remaining_length <= 0:
            break

        # 生成词长并根据剩余长度动态调整[1,4](@ref)
        word_length = random.choices(range(2, 9), weights=[1 / (i ** 2) for i in range(2, 9)])[0]
        if word_length > remaining_length:
            word_length = remaining_length  # 自适应调整词长

        # 计算可选起始范围[5,7](@ref)
        start_min = last_end_idx
        start_max = len(text) - word_length

        # 随机选择起始位置
        start_idx = random.randint(start_min, start_max)
        end_idx = start_idx + word_length

        # 提取词并更新状态[3](@ref)
        selected_word = text[start_idx:end_idx]
        selected_words.append(selected_word)
        last_end_idx = end_idx

        # 遇到文本末尾立即终止[5](@ref)
        if end_idx == len(text):
            break

    return selected_words


def get_hotwords_padded_from_file_path(file_path, tokenizer):
    words_list = utils_file.load_list_file_clean(file_path)
    words_list = [item.strip().replace(' ', '') for item in words_list]
    token_list = [torch.tensor(tokenizer.tokenize(item)[1], dtype=torch.long) for item in words_list]
    # for word, token in zip(words_list, token_list):
    #     print(word, token)
    padding_hotword_label_list = pad_sequence(token_list,
                                              batch_first=True,
                                              padding_value=0)
    token_lens = torch.tensor([len(item) for item in token_list], dtype=torch.long)
    return padding_hotword_label_list, token_lens




def _test_extract_hot_words_with_order():
    text = "今天天气很好，下午的天气真不错。"
    time_now = time.time()
    hot_words = extract_hot_words_with_order(text)
    print(f'耗时：{time.time() - time_now:.6f}')
    print(hot_words)  # ['天气', '真']
    text = "这是一个伟大的时代，我们必须努力向前。"
    time_now = time.time()
    hot_words = extract_hot_words_with_order(text)
    print(f'耗时：{time.time() - time_now:.6f}')
    print(hot_words)  # ['时代', '努力']
    text = "郑州是个美丽的城市，是中国河南省平顶山市的省会，我们要努力建设新时代。郑州是个美丽的城市，是中国河南省平顶山市的省会，我们要努力建设新时代。郑州是个美丽的城市，是中国河南省平顶山市的省会，我们要努力建设新时代。郑州是个美丽的城市，是中国河南省平顶山市的省会，我们要努力建设新时代。"
    time_now = time.time()
    hot_words = extract_hot_words_with_order(text)
    print(f'耗时：{time.time() - time_now:.6f}')
    print(hot_words)  # ['时代', '努力']
    text = "今天我去了趟北京，看了看天气，真的很好。"
    time_now = time.time()
    hot_words = extract_hot_words_with_order(text)
    print(f'耗时：{time.time() - time_now:.3f}')
    print(hot_words)  # ['北京', '天气']

def _test_get_hotwords_padded_from_file_path():
    file_path = 'examples/osum/conf/config_llm_huawei_base-version.yaml'
    with open(file_path, 'r') as fin:
        configs = yaml.load(fin, Loader=yaml.FullLoader)
    tokenizer = init_tokenizer(configs)
    res = get_hotwords_padded_from_file_path("/home/work_nfs11/znlin/znlin_nfs11/wenet_DV/examples/librispeech/s0/data/all_biased/all_biased_list_unigram5000_tokens", tokenizer)
    print(res.shape)

# 示例
if __name__ == '__main__':
    # _test_get_hotwords_padded_from_file_path()
    _test_extract_hot_words_with_order()

