import time
import random
import copy
import torch
import pickle
import os
import json
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from loguru import logger
import re
#会使用的函数save_hyperparams、normalize_word、get_pos_embeded、str2list、list_find
#保存输入参数和配置
def save_hyperparams(args, config_dict):
    """
    将训练过程中的超参数配置保存为 JSON 文件。
    :param args: 包含命令行参数的对象，通常通过 argparse 获取
    :param config_dict: 包含其他配置信息的字典
    """
    # 保存 args 对象（通常包含超参数）到 args.json 文件
    args_file = os.path.join(args.tensorboard_logdir, "args.json")
    with open(args_file, "w") as f:
        json.dump(vars(args), f, indent=2, ensure_ascii=False)
    # 保存配置字典到 config.json 文件
    config_file = os.path.join(args.tensorboard_logdir, "config.json")
    with open(config_file, "w") as f:
        json.dump(config_dict, f, indent=2, ensure_ascii=False)


# normalize number to 0
# 标准化字符串中的数字
def normalize_word(word):
    """
    将字符串中的数字替换为 '0'。
    :param word: 输入的字符串
    :return: 返回数字被替换后的新字符串
    """
    new_word = ""
    for char in word:
        if char.isdigit(): # 检查字符是否是数字
            new_word += "0" # 将数字替换为 '0'
        else:
            new_word += char # 非数字字符保持原样
    return new_word


# Generate position embeddings
# 生成位置嵌入
def get_pos_embeded(i, lf1, rg1, lf2, rg2, maxlen=80):
    """
    根据给定的区间和位置索引生成位置嵌入。
    :param i: 当前索引位置
    :param lf1, rg1: 第一个区间的左边界和右边界
    :param lf2, rg2: 第二个区间的左边界和右边界
    :param maxlen: 最大长度，用于缩放位置
    :return: 返回两个区间的嵌入结果
    """

    # scale to [0,2*max-length+2]
    # 位置嵌入函数，将位置值映射到 [0, 2*maxlen + 2] 范围
    def pos_embed(x):
        if x < -1 * maxlen:  # 如果位置小于 -maxlen，则返回 0
            return 0
        if -1 * maxlen <= x <= maxlen: # 如果位置在 [-maxlen, maxlen] 范围内
            return x + maxlen + 1  # 映射到 [0, 2*maxlen+1]
        if x > maxlen: # 如果位置大于 maxlen
            return 2 * maxlen + 2 # 返回最大值

    # corresponding to Eq. 1 in paper
    # 根据公式计算两个区间的位置信息
    def pos_embed2(i, l, r):
        if i >= l and i <= r:# 如果位置在区间 [l, r] 内，返回 0
            x = 0
        elif i < l: # 如果位置在区间左侧
            x = i - l # 计算距离左边界的偏移量
        else: # 如果位置在区间右侧
            x = i - r # 计算距离右边界的偏移量
        return pos_embed(x) # 返回计算后的嵌入

    # 返回两个区间的嵌入
    return pos_embed2(i, lf1, rg1), pos_embed2(i, lf2, rg2)


# transform string to list
# '第<N>天' -> ['第','<N>','天']
# 将字符串转换为字符列表
def str2list(str, spec=None):
    """
   将字符串转换为字符列表，可以处理特定的标记。
   :param str: 输入的字符串
   :param spec: 特定标记的列表，如果传入，则会识别并处理这些标记
   :return: 转换后的字符列表
   """
    str = re.sub("\s+", "", str) # 去掉字符串中的所有空白字符
    res = [] # 存储转换结果
    i = 0
    while i < len(str):
        match = False # 标记是否匹配到特殊标记
        if spec: # 如果提供了特殊标记
            for sp in spec: # 遍历每个特殊标记
                splen = len(sp)
                if i + splen <= len(str) and str[i : i + splen] == sp: # 匹配成功
                    res.append(sp) # 将匹配到的标记加入结果列表
                    i += splen # 跳过已匹配的部分 保证了特殊标记不会被拆开
                    match = True
                    break
        if not match: # 如果没有匹配到特殊标记
            res.append(str[i]) # 将当前字符加入结果列表
            i += 1 # 继续处理下一个字符
    return res


# whether ls2 in ls1 (list), return index, similar to function find()
# 查找列表 ls2 是否在列表 ls1 中，返回匹配的起始索引
# 类似于 Python 中的 find() 函数
def list_find(ls1, ls2):
    """
   在列表 ls1 中查找子列表 ls2，返回子列表在母列表中的起始位置。
   :param ls1: 母列表
   :param ls2: 子列表
   :return: 子列表在母列表中的起始索引，如果没有找到返回 -1
   """
    ind = -1 # 默认没有找到
    for i in range(len(ls1) - len(ls2) + 1): # 遍历所有可能的起始位置
        match = True
        for j in range(len(ls2)): # 遍历子列表的每个元素
            if ls1[i + j] != ls2[j]: # 如果某个元素不匹配
                match = False
                break
        if match: # 如果完全匹配
            ind = i # 记录起始位置
            break # 停止搜索
    return ind


# def get_text_input(self, caption):
#     caption_tokens = self.tokenizer.tokenize(caption)
#     caption_tokens = ["[CLS]"] + caption_tokens + ["[SEP]"]
#     caption_ids = self.tokenizer.convert_tokens_to_ids(caption_tokens)
#     if len(caption_ids) >= self.max_seq_len:
#         caption_ids = caption_ids[: self.max_seq_len]
#     else:
#         caption_ids = caption_ids + [0] * (self.max_seq_len - len(caption_ids))
#     caption = torch.tensor(caption_ids)
#     return caption
#
#
# def load_model_decode(model_dir, data, name, gpu, seg=True):
#     data.HP_gpu = gpu
#     print("Load Model from file: ", model_dir)
#     model = SeqModel(data)
#
#     model.load_state_dict(torch.load(model_dir))
#
#     print(("Decode %s data ..." % (name)))
#     start_time = time.time()
#     speed, acc, p, r, f, pred_results, gazs = evaluate(data, model, name)
#     end_time = time.time()
#     time_cost = end_time - start_time
#     if seg:
#         print(
#             (
#                 "%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"
#                 % (name, time_cost, speed, acc, p, r, f)
#             )
#         )
#     else:
#         print(
#             (
#                 "%s: time:%.2fs, speed:%.2fst/s; acc: %.4f"
#                 % (name, time_cost, speed, acc)
#             )
#         )
#
#     return pred_results
#
#
# def print_results(pred, modelname=""):
#     toprint = []
#     for sen in pred:
#         sen = " ".join(sen) + "\n"
#         toprint.append(sen)
#     with open(modelname + "_labels.txt", "w") as f:
#         f.writelines(toprint)
#
#
# def predict_check(pred_variable, gold_variable, mask_variable):
#     """
#     input:
#         pred_variable (batch_size, sent_len): pred tag result, in numpy format
#         gold_variable (batch_size, sent_len): gold result variable
#         mask_variable (batch_size, sent_len): mask variable
#     """
#
#     pred = pred_variable.cpu().data.numpy()
#     gold = gold_variable.cpu().data.numpy()
#     mask = mask_variable.cpu().data.numpy()
#     overlaped = pred == gold
#     right_token = np.sum(overlaped * mask)  ## ！！ token-level
#     total_token = mask.sum()
#
#     return right_token, total_token
#
#
# def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet):
#     """
#     input:
#         pred_variable (batch_size, sent_len): pred tag result
#         gold_variable (batch_size, sent_len): gold result variable
#         mask_variable (batch_size, sent_len): mask variable
#     """
#     batch_size = gold_variable.size(0)
#     seq_len = gold_variable.size(1)
#     mask = mask_variable.cpu().data.numpy()
#     pred_tag = pred_variable.cpu().data.numpy()
#     gold_tag = gold_variable.cpu().data.numpy()
#     batch_size = mask.shape[0]
#     pred_label = []
#     gold_label = []
#     for idx in range(batch_size):
#         pred = [
#             label_alphabet.get_instance(int(pred_tag[idx][idy]))
#             for idy in range(seq_len)
#             if mask[idx][idy] != 0
#         ]
#         gold = [
#             label_alphabet.get_instance(gold_tag[idx][idy])
#             for idy in range(seq_len)
#             if mask[idx][idy] != 0
#         ]
#
#         assert len(pred) == len(gold)
#         pred_label.append(pred)
#         gold_label.append(gold)
#
#     return pred_label, gold_label
#
#
# def print_batchword(data, batch_word, n):
#     with open("labels/batchwords.txt", "a") as fp:
#         for i in range(len(batch_word)):
#             words = []
#             for id in batch_word[i]:
#                 words.append(data.word_alphabet.get_instance(id))
#             fp.write(str(words))
#
#
# def save_data_setting(data, save_file):
#     new_data = copy.deepcopy(data)
#     ## remove input instances
#     new_data.train_texts = []
#     new_data.dev_texts = []
#     new_data.test_texts = []
#     new_data.raw_texts = []
#
#     new_data.train_Ids = []
#     new_data.dev_Ids = []
#     new_data.test_Ids = []
#     new_data.raw_Ids = []
#     ## save data settings
#
#     # with open(save_file, 'wb') as fp:
#     #     pickle.dump(new_data, fp)
#     # print( "Data setting saved to file: ", save_file)
#
#
# def load_data_setting(save_file):
#     with open(save_file, "rb") as fp:
#         data = pickle.load(fp)
#     print("Data setting loaded from file: ", save_file)
#     data.show_data_summary()
#     return data
#
#
# def lr_decay(optimizer, epoch, decay_rate, init_lr):
#     lr = init_lr * ((1 - decay_rate) ** epoch)
#     print(" Learning rate is setted as:", lr)
#     for param_group in optimizer.param_groups:
#         param_group["lr"] = lr
#     return optimizer
#
#
# def set_seed(seed_num=1023):
#     random.seed(seed_num)
#     torch.manual_seed(seed_num)
#     np.random.seed(seed_num)
