import json
import random

import numpy as np


def greedy_sampling(sentences, labels, G, entity_set):
    """
    Greedy Sampling Algorithm for CL-NER

    Args:
    - sentences: List of sentences
    - labels: List of label sequences corresponding to the sentences
    - G: Number of slices
    - entity_set: List of unique entity labels in the dataset

    Returns:
    - A list of G slices, each slice is a tuple containing sentences and their labels
    """
    # Initialize datasets for G slices
    datasets = [([], []) for _ in range(G)]

    # Count of sentences in each slice
    cnt = [0] * G

    # Calculate the number of sentences to allocate in each slice
    n = [len(sentences) * len(entity) // len(entity_set) for entity in entity_set]

    # Sort entities based on their frequency
    sorted_entities = sorted(entity_set, key=lambda e: sum([label.count(e) for label in labels]))

    for s, t in zip(sentences, labels):
        is_select = False

        # Allocate low frequency entity types in priority
        for e in sorted_entities:
            if e not in t:
                continue

            # Find the slice index which this entity should belong to
            j = entity_set.index(e)

            if cnt[j] < n[j]:
                datasets[j][0].append(s)
                datasets[j][1].append(t)
                cnt[j] += 1
                is_select = True
                break

        # Otherwise, the sentence is randomly assigned to an incomplete slice
        if not is_select:
            incomplete_slices = [i for i in range(G) if cnt[i] < n[i]]
            selected_slice = random.choice(incomplete_slices)
            datasets[selected_slice][0].append(s)
            datasets[selected_slice][1].append(t)
            cnt[selected_slice] += 1

    return datasets

with open(file="train.json", mode="r", encoding="utf-8") as f:
    sentences = []
    labels = []
    for line in f:
        data = json.loads(line)
        sentences.append(data["text"])
        label = ["O"] * len(data["text"])
        entity = data["label"]
        for type, type_ents in entity.items():
            for ent_name, ent_pos in type_ents.items():
                ent_pos = ent_pos[0]
                label[ent_pos[0]: ent_pos[1] + 1] = [type] * (ent_pos[1] - ent_pos[0] + 1)
        labels.append(label)

    entity_set = ["address", "book", "company", "game", "goverment", "movie", "name", "organization", "position", "scene"]
    G = 10

    train_dataset = greedy_sampling(sentences, labels, G, entity_set)
    for datas in train_dataset:
        print(len(datas[0]))
        for i in range(len(datas[0])):
            print(datas[0][i])
            print(datas[1][i])
            print(len(datas[0][i]), len(datas[1][i]))
            if i == 10:
                break


    # for i in range(len(sentences)):
    #     print(sentences[i])
    #     print(labels[i])
    #     print(len(sentences[i]), len(labels[i]))
    #     if i == 10:
    #         break
    # exit(0)
    # print(sentences)
    # print(labels)

