import json

import httpx
from utils.logger import get_logger

logger = get_logger()

def notify(data, callback_url, task_id, status: str):
    try:
        responese = httpx.post(callback_url, json={"task_id": task_id,
                                                   "status": status,
                                                   "metaData": data},
                               headers={"Content-Type": "application/json"})
        if responese.status_code != 201:
            logger.error(f"error to notify {responese.text}")
        # logger.info(responese.json())
    except:
        import traceback
        logger.error(f"post callback url failed{traceback.format_exc()}")

def parse_s3url(url: str):
    bucket_name, object_name = url[len("s3://"):].split('/', 1)
    return bucket_name, object_name

def read_metadata(storage, bucket_name, object_name):
    try:
        data = storage.get_object(bucket_name, object_name)
        metadata = json.loads(data.read())
        return metadata
    except:
        raise FileNotFoundError(f"metadata not found {object_name}/{object_name}")

def get_consecutive_ids(lst, index=0):
    """
    获取列表中连续相同数值的索引范围。

    :param lst: 列表，包含可比较的元素
    :return: 生成器，返回形如((start_index, end_index), value)的元组
    """
    from itertools import groupby

    consecutive_ids = []
    for k, g in groupby(lst):
        g = list(g)
        consecutive_ids.append([i for i in range(index, index + len(g))])
        index += len(g)
    return consecutive_ids

def cut_list(indices, lst):
    indexes = [0] + indices + [len(lst)-1]

    # 创建一个空列表来存储分割后的子列表
    sublists = []

    # 遍历索引，创建子列表
    for i in range(len(indexes) - 1):
        start = indexes[i]
        end = indexes[i + 1]
        if start == end:
            continue
        if end == len(lst) - 1:
            end += 1
        sublists.append(lst[start:end])
    return sublists

def has_word(word, content):
    import re
    match = re.search(word, content)
    if match:
        return True
    return False