import codecs
import json
import os
from datetime import datetime

import colorama
import jsonlines
from colorama import Fore, Style
from tqdm import tqdm

# pip install colorama jsonlines openai
def logging_print(*args, print_mode=True):
    """
    :param args:
    :param print_mode:
    :return:
    """
    string_temp = " ".join([str(arg) for arg in args])
    if print_mode:
        time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S GXL-INFO ')
        string_temp = time_str + ' ' + string_temp
        print(string_temp, flush=True)


def logging_info(*args, print_mode=True):
    string_temp = " ".join([str(arg) for arg in args])
    if print_mode:
        time_str = datetime.now().strftime(f'%Y-%m-%d %H:%M:%S GXL-INFO ')
        string_temp = time_str + ' ' + string_temp
        print( Fore.GREEN + string_temp+ Style.RESET_ALL, flush=True)

def logging_error(*args, print_mode=True):
    string_temp = " ".join([str(arg) for arg in args])
    if print_mode:
        time_str = datetime.now().strftime(f'%Y-%m-%d %H:%M:%S GXL-ERROR ')
        string_temp = time_str + ' ' + string_temp
        print(Fore.RED + string_temp + Style.RESET_ALL, flush=True)


def logging_warning(*args, print_mode=True):
    string_temp = " ".join([str(arg) for arg in args])
    if print_mode:
        time_str = datetime.now().strftime(f'%Y-%m-%d %H:%M:%S GXL-WARNING ')
        string_temp = time_str + ' ' + string_temp
        print(Fore.YELLOW + string_temp + Style.RESET_ALL, flush=True)



class LimitPrinter:
    def __init__(self):
        self.max = 300
        self.now = 0

    def print(self, *args):
        text = ' '.join([str(x) for x in args])
        if self.now < self.max:
            logging_print("LIMIT_PRINT: ", text)
            self.now += 1

    def set_max(self, max_in):
        self.max = max_in

    def reset(self):
        self.now = 0


global_limit_printer = LimitPrinter()


def logging_limit_print(*text):
    global global_limit_printer
    global_limit_printer.print(*text)


def write_dict_list_to_jsonl(dict_list, jsonl_file_path, is_append: bool = False):
    logging_print("write_dict_list_to_jsonl()_数据总条数为:", len(dict_list))
    if not is_append:
        if os.path.exists(jsonl_file_path):
            os.remove(jsonl_file_path)
    os.makedirs(os.path.dirname(jsonl_file_path), exist_ok=True)
    # for dic in dict_list:
    #     with jsonlines.open(jsonl_file_path, mode='a') as f:
    #         f.write(dic)
    with jsonlines.open(jsonl_file_path, mode='w') as f:
        f.write_all(dict_list)


def load_dict_from_scp(label_scp_file: str, silence: bool = False) -> dict:
    """
    得到scp文件的内容,要求key value以空格分割， 第一个为key,剩下的都是value。
    :param label_scp_file:
    :return:
    """
    res = {}
    with codecs.open(label_scp_file, 'r', encoding='utf-8') as f:
        try:
            lines = f.readlines()
        except Exception as e:
            print(e)
            return {}
        for line in lines:
            line = line.strip()
            items = line.split()
            if len(items) < 2:
                if not silence:
                    logging_print(
                        'load_dict_from_scp;warning_gxl:, this row not conform to the regulation of scp(key content) and skip it:',
                        line)
                continue
            elif len(items) == 2:
                res[items[0].strip()] = items[1].strip()
            else:
                # logging_print(
                #     'warning_gxl:, this row not conform to the regulation of'
                #     ' scp(key content) and no skip it,第一个为key,剩下的都是value:',
                #     line)
                res[items[0].strip()] = (' '.join(items[1:])).strip()
    total_len = len(res)
    logging_print("load_dict_from_scp()_数据总条数为:", total_len)
    return res


def do_split_list(source_list, num_subsets):
    """
    最后一个块包含多余的余数
    :param source_list:
    :param num_subsets:
    :return:
    """
    keys_per_subset = len(source_list) // num_subsets
    # 初始化子集字典列表
    subsets = []
    # 分割字典
    for i in range(num_subsets):
        # 计算当前子集的起始和结束索引
        start_index = i * keys_per_subset
        end_index = (i + 1) * keys_per_subset if i < num_subsets - 1 else None
        subset_list = source_list[start_index:end_index]
        subsets.append(subset_list)
    return subsets

def do_split_dict(original_dict, num_subsets):
    """
    多余的那些和最后一个块放在一起，所以最后一个块是最多的
    :param original_dict:
    :param num_subsets:
    :return:
    """
    # 计算每个子集的键的数量
    keys_per_subset = len(original_dict) // num_subsets

    # 将字典的键转换为列表
    keys = list(original_dict.keys())

    # 初始化子集字典列表
    subsets = []

    # 分割字典
    for i in range(num_subsets):
        # 计算当前子集的起始和结束索引
        start_index = i * keys_per_subset
        end_index = (i + 1) * keys_per_subset if i < num_subsets - 1 else None

        # 提取当前子集的键
        subset_keys = keys[start_index:end_index]

        # 创建子集字典
        subset_dict = {key: original_dict[key] for key in subset_keys}

        # 将子集字典添加到列表中
        subsets.append(subset_dict)

    return subsets

def load_dict_list_from_jsonl(jsonl_file_path) -> list:
    """"""
    logging_print("开始执行: load_dict_list_from_jsonl")
    with codecs.open(jsonl_file_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        lines_res = []
        for line in lines:
            try:
                line = json.loads(line)
                lines_res.append(line)
            except Exception as e:
                print(e)
                continue
    return lines_res

