import jsonlines
import warnings
import os, json
import os.path as osp
import re  
import random
from typing import Any, Optional
from collections.abc import Iterator
import openpyxl
import math
from datetime import date
import hashlib


random.seed(1024)


def read_json(path: str):
    """读取json文件"""
    
    with open(path, 'r', encoding='utf-8') as f:
        jsonf = json.load(f)
    return jsonf
        
def save_json(path: str, 
              jsonf: list[dict]) -> None:
    """存储json数据"""
    
    with open(path, 'w', encoding='utf-8') as f:
        json.dump(jsonf, f, ensure_ascii=False, indent=2)

def read_jsonl(path: str, warn: bool=True) -> list[dict]:
    """读取jsonl文件"""

    ret = []
    with jsonlines.open(path) as f:
        ret = [obj for obj in f]
    if not ret and warn:
        warnings.warn(f'{path} have no data.')
    return ret

def count_doc_lines(path):
    if not os.path.exists(path):
        return 0
    
    import subprocess

    result = subprocess.run(['sed', '-n', '$=', path], stdout=subprocess.PIPE)
    return int(result.stdout.split()[0])
                
def iter_read_jsonl(path: str) -> Iterator:
    with jsonlines.open(path) as f:
        for obj in f:
            yield obj
    
                
def save_jsonl(path: str, 
               jsons: list[dict], 
               mode='w') -> None:
    """存储jsonl数据"""
    os.makedirs(osp.dirname(path), exist_ok=True)
    print('save_path:',path)
    with open(path, mode=mode, encoding='utf-8') as f:
        for json_object in jsons:
            json.dump(json_object, f, ensure_ascii=False)
            f.write('\n')


def get_dir_file(dirname: str) -> tuple[list[str], list[str]]:
    """得到文件夹下的所有文件，返回绝对路径list和文件名list"""
    dirpaths = [os.path.join(dirname, name) for name in os.listdir(dirname)]
    name = [name for name in os.listdir(dirname)]
    return dirpaths, name

def fuzzy_json_parser(line: str) -> str:
    """模糊json解析"""
    line = re.sub(r'(```json|```)', '', line)
    return line.strip()


def split_train_test_set(train_test: tuple[list[Any]], 
                         train_percentage: float=0.9,
                         shuffle: bool = True) -> tuple[list[Any], list[Any]]:
    """分割训练测试集"""
    if shuffle: random.shuffle(train_test)
    train_nums = int(len(train_test)*train_percentage)
    train = [{k:v for k,v in d.items() if k not in ['prompt']} for d in train_test[:train_nums]]
    test = [{k:v for k,v in d.items() if k not in ['text']} for d in train_test[train_nums:]]
    return train, test
    
    
    
def split_train_test_pos_neg_set(pos_train_test:list[tuple[Any, Any]], 
                                 neg_train_test:list[tuple[Any, Any]],
                                 train_percentage: float = 0.9,
                                 pos_percentage: float = 0.5,
                                 shuffle: bool = True,
                                 report: bool = True) -> tuple[Any, Any]:
    if shuffle: (random.shuffle(pos_train_test), random.shuffle(neg_train_test))
    
    train, test = [], []
    pos_train_nums = int(len(pos_train_test)*train_percentage)
    train.extend([i[0] for i in pos_train_test[:pos_train_nums]])
    test.extend([i[1] for i in pos_train_test[pos_train_nums:]])
    
    neg_train_nums = int(pos_train_nums / pos_percentage * (1-pos_percentage))
    train.extend([i[0] for i in neg_train_test[:neg_train_nums]])
    
    neg_train_test = neg_train_test[neg_train_nums:] if neg_train_nums<len(neg_train_test) else []
    neg_test_nums = min(int(len(test) * 2), len(neg_train_test))
    test.extend([i[1] for i in neg_train_test[:neg_test_nums]])
    
    if report:
        print(f'pos_train:{pos_train_nums} | neg_train:{neg_train_nums} |'
              f'pos_test:{len(pos_train_test)-pos_train_nums} | neg_test:{neg_test_nums}')
        
    return (train, test)

def split_data_to_subset(datas: list[Any], 
                         n: int=1) -> list[list[Any]]:
    """按n个一组打包datas"""
    if n > len(datas):
        return [datas]
    
    group = math.ceil(len(datas)/n)
    datas = [datas[i*n: (i+1)*n] for i in range(group)]
    return datas 


    
        
def greedy_matcher(string: str, 
                   greedy_str: list[str], 
                   default_return: Any=None, 
                   raise_error: bool = False) -> Optional[str]:
    """
        贪心匹配string。
        
        string: 
            主string
        greedy_str: 
            待匹配字符串，顺序为贪心顺序
        default_return: 
            若无匹配，则返回default_return
        raise_error: 
            若无匹配，则raise Error
    """
    for gs in greedy_str:
        if gs in string:
            return gs 
    if raise_error:
        raise
    return default_return
    
def text_replace(strings: list[str], 
                 replacer: dict) -> list[str]:
    """
        迭代文本替换。
        
        strings:
            一组待替换文本
        replacer:
            替换规则
    """
    return [_text_replace(s, replacer) for s in strings]

def _text_replace(string: str, replacer: dict) -> str:
    for k,v in replacer.items():
        string = string.replace(k, v)
    return string

def get_sub_list_index(sub_list: list,
                       main_list: list,
                       left: int = 0,
                       right: int = None) -> tuple[int, int]:
    """
        寻找子串在主串中的左右位置，要求区间不能覆叠。
    """
    if not right:
        right = len(main_list)-1
    right = min(len(main_list)-1, right)
        
    ret = []
    skip = 0
    for idx in range(left, len(main_list)):
        if skip:
            skip -= 1
            continue
        for sidx, s in enumerate(sub_list):
            if len(sub_list) > len(main_list[idx:right+1]):
                break
            if s == main_list[idx + sidx]:
                pass
            else:
                break
            if sidx == len(sub_list) - 1:
                ret.append((idx, idx+sidx))
                skip += sidx
    return ret

def ppl(sequence_logprob: list[float],
        default_return: float=2**40) -> float:
    if not sequence_logprob or sequence_logprob == []:
        warnings.warn(f"输入为空list或None, 返回默认值{default_return}")
        return default_return
    res = - sum(sequence_logprob) / len(sequence_logprob)
    res = 2 ** res 
    return res

def acc(a: list, b:list) -> float:
    if len(a) != len(b):
        raise ValueError(f'长度不一致,分别为{len(a)}和{len(b)}.')
    
    res = sum([1 for _a, _b in zip(a,b) if _a == _b]) / len(a)
    return res
    
    

# def convert_jsonl_to_excel(input_file_name, output_file_name):
#     f = read_file(input_file_name)
#     f = standard_func(f)
#     wd = openpyxl.Workbook()
#     ws = wd.active
#     keys = f[0].keys()
#     ws.append([str(i) for i in keys])
#     for i in f:
#         ws.append([str(i[k]) for k in keys])
#     wd.save(output_file_name)

def pprint(*args, **kwargs):
    return
    # print(*args, **kwargs)


def get_file(path: str, 
             endswith: str='.json') -> list[str]:
    """迭代得到路径下所有的.json文件"""
    json_paths = []
    paths = [os.path.join(path,name) for name in os.listdir(path)]
    for path in paths:
        if os.path.isdir(path):
            # print(path)
            json_paths.extend(get_file(path, endswith=endswith))
        elif path.endswith(endswith):
            json_paths.append(path)
        
    return json_paths


class DuplicateVanisher:
    """
        帮助管理数据，高效地去重。
    """
    ...
    
    
def is_equal_len(datas_list:list[list],
                 std_len: int=None) -> bool:
    if not std_len:
       std_len = len(datas_list[0])
    if all(len(datas)-std_len==0 for datas in datas_list):
        return True
    else:
        return False
    
def sep_datas_list_equally(datas_list: list[list],
                      part: int) -> list[list[list]]:
    """
        对于n种*m条数据，要在m上分割为part份，形成n*part*k
    """
    # if not is_equal_len(datas_list):
    #     raise ValueError(
    #         f'提供的多组数据长度不同: {[len(datas) for datas in datas_list]}'
    #     )
    splited = [sep_datas_equally(datas, part) for datas in datas_list]
    
    return splited
    
def sep_datas_equally(datas: list,
                      part: int) -> list[list]:
    avg_len = math.ceil(len(datas) / part)
    datas_list = [datas[i*avg_len: (i+1)*avg_len] 
                        for i in range(part)]
    return datas_list


def sep_datas(datas:list,
              block_size: int=2
) -> list[list]:
    block_nums = math.ceil(len(datas) / block_size)
    return [datas[i*block_size: (i+1)*block_size] for i in range(block_nums)]


def add_time_suffix(string: str) -> str:
    return string + '-time-' + str(date.today())

def remove_time_suffix(string: str) -> str:
    return re.split('-time-', string)[0]
    
def sentence_md5(sentences):
    if isinstance(sentences, str):
        sentences = [sentences]
    md5_clint = hashlib.md5()
    content = "|".join(sentences)
    md5_clint.update(content.encode("utf-8"))
    md5_key = md5_clint.hexdigest()
    return md5_key

def get_time():
    return date.today()


    

if __name__ == '__main__':
    datas = read_jsonl('/mnt/user/linzhixin/heatmap/data/edu-data-20250109/one-shot-filted/20250109-valid/1-time-2025-02-18-pos-train.jsonl')