import json
from typing import Dict, Any, Tuple, List, Optional

from tqdm import tqdm

from config import CUSTOM_SEP
from preprocess.file_spliter import get_custom_features, get_batche_index


# 将数据转换为一个字典或列表，然后遍历进行分割
def split_json(data, max_length):
    # 递归处理嵌套的数据结构
    chunks = []
    current_chunk = []
    current_chunk_length = 0

    # 如果是列表类型
    if isinstance(data, list):
        for item in data:
            item_str = json.dumps(item, ensure_ascii=False)
            item_length = len(item_str)
            # 如果加上这个项后超长，先保存当前 chunk
            if current_chunk_length + item_length > max_length:
                chunks.append(f'[{",".join([json.dumps(i, ensure_ascii=False) for i in current_chunk])}]')
                current_chunk = [item]
                current_chunk_length = item_length
            else:
                current_chunk.append(item)
                current_chunk_length += item_length
        # 添加最后剩余的部分
        if current_chunk:
            chunks.append(f'[{",".join([json.dumps(i, ensure_ascii=False) for i in current_chunk])}]')

    # 如果是字典类型
    elif isinstance(data, dict):
        for key, value in data.items():
            item_str = json.dumps({key: value}, ensure_ascii=False)
            item_length = len(item_str)
            # 如果加上这个键值对后超长，先保存当前 chunk
            if current_chunk_length + item_length > max_length:
                chunks.append(f'{",".join([json.dumps(i, ensure_ascii=False) for i in current_chunk])}')
                current_chunk = [{key: value}]
                current_chunk_length = item_length
            else:
                current_chunk.append({key: value})
                current_chunk_length += item_length
        # 添加最后剩余的部分
        if current_chunk:
            chunks.append(f'{",".join([json.dumps(i, ensure_ascii=False) for i in current_chunk])}')

    return [i for i in chunks if i]


def get_stc_blocks(
        chunk_size: int,
        row: Dict[str, Any]

):
    chunks: List[str] = []

    src_id = row['dataSrc'] + CUSTOM_SEP + row['dataId']
    stc_data = [row['descData']]
    stc_data.extend(row['structuredData'])

    for item in row.get('structuredData', []):
        for seg in split_json(item, chunk_size):
            chunks.append(seg)
    expr = get_custom_features(chunks)
    vec_id = [f'{src_id}-{i}' for i in range(len(chunks))]
    stc_id_ls = [src_id] * len(chunks)
    index_str = f'0,{len(chunks)}'
    return vec_id, chunks, expr, stc_id_ls, index_str


def insert_stc_to_milvus(
        rows,
        chunk_size: int,
        milvus_op: Any,
        embedder: Any,
        batch_size: int = 500
):
    """
    Process rows into text chunks, compute embeddings, and insert into Milvus in batches.

    Args:
        rows: List of input data rows.
        chunk_size: Size to split each row's text into blocks.
        milvus_op: Milvus operation wrapper with a `collection` attribute.
        embedder: Embedder with an `encode` method for batch embeddings.
        batch_size: Number of records to insert per batch.
    """
    # Accumulate all items

    index_ls = []
    vec_ids, chunks, added_values, stc_ids = [], [], [], []
    for row in rows:
        vec_id_list, chunk_list, added_list, src_id_list, index = get_stc_blocks(chunk_size, row)
        index_ls.append(index)
        vec_ids.extend(vec_id_list)
        chunks.extend(chunk_list)
        added_values.extend(added_list)
        stc_ids.extend(src_id_list)

    total = len(stc_ids)
    if total == 0:
        return index_ls

    # 根据文本列表的长度和指定的存储批次大小，获取批次索引
    vec_batches = get_batche_index(total, batch_size)

    # 创建进度条
    bar = tqdm(vec_batches, desc='Inserting stc vectors')

    for start, end in bar:
        # Slice current batch
        batch_chunks = chunks[start:end]
        batch_embeddings = embedder.encode(batch_chunks,show_progress_bar=False)

        # Prepare records for Milvus
        records = [
            {
                'vecId': vec_id,
                'block': block,
                'addedValue': added,
                'srcId': src,
                'blockDenseEmbeddings': embedding
            }
            for vec_id, block, added, src, embedding in zip(
                vec_ids[start:end],
                batch_chunks,
                added_values[start:end],
                stc_ids[start:end],
                batch_embeddings
            )
        ]
        # Bulk insert into Milvus
        milvus_op.collection.insert(records)
    return index_ls
