from zkl_aiutils_datasets import load_dataset
from zkl_pyutils_fsspec import FsLike, resolve_fs

from .write_embeddings import EmbeddingWriter

def process_str_embeddings(
    fs: FsLike,
    output_dir: str,
    model_name: str,
    type_name: str,
    tensor_parallel_size: int,
    batch_size: int = 1024,
    check_existing_keys: bool = False
):
    """
    Processes embeddings by first sorting and deduplicating the text data.
    """
    print("Starting string deduplication process...")

    # Load the dataset
    fs = resolve_fs(fs)
    dataset = load_dataset(fs)
    type_dataset = dataset.named_children[type_name]
    print(f"Dataset loaded from: {fs}")

    # Collect and deduplicate strings
    unique_strings = set()
    print("Collecting and deduplicating strings...")
    try:
        num_samples = len(type_dataset)
    except TypeError:
        num_samples = "unknown"

    for i, sample in enumerate(type_dataset):
        if (i + 1) % 10000 == 0:
            if num_samples != "unknown":
                print(f"Processing sample {i + 1}/{num_samples}")
            else:
                print(f"Processing sample {i + 1}")
        
        # # 设置仅读取前1000个样本
        # if i >= 1000:
        #     break
    

        for text in sample:
            if text is not None:
                unique_strings.add(str(text))
    
    print("Finished collecting and deduplicating strings.")

    # Get unique sorted strings
    unique_sorted_strings = sorted(list(unique_strings))
    print(f"Total unique strings found: {len(unique_sorted_strings)}")

    # Write embeddings
    print("Writing embeddings for unique sorted strings...")
    with EmbeddingWriter(
        output_dir, 
        model_name, 
        tensor_parallel_size=tensor_parallel_size,
        check_existing_keys=check_existing_keys  # Keys are already unique
    ) as embedding_writer:
        for i in range(0, len(unique_sorted_strings), batch_size):
            # 打印当前批次的进度分数
            progress = (i + batch_size) / len(unique_sorted_strings)
            print(f"Progress: {progress:.2%}")
            

            batch = unique_sorted_strings[i:i + batch_size]
            embedding_writer.embedding_and_write_batch(batch)

    print("Finished processing all samples.")