profner_ner_master / generation_script.py
luisgasco's picture
Update generation_script.py
87a8538 verified
#This was generated using the train_spacy.txt and valida_spacy.txt files.
# We have replace all the lbels by B-PROFESION or I-PROFESION
# Validation and training set half of the sentences with labels and the other ones without them.
from collections import defaultdict
import random
def contiene_b(frase):
return any(label.startswith("B-") for _, label in frase)
def procesar_training_set_balanceado(archivo_entrada, archivo_salida, archivo_ids):
frases = defaultdict(list)
with open(archivo_entrada, encoding="utf-8") as f:
for linea in f:
if linea.strip():
partes = linea.strip().split()
if len(partes) == 5:
token, doc_id, _, _, label = partes
frases[doc_id].append((token, label))
con_b_ids = [id_ for id_, frase in frases.items() if contiene_b(frase)]
sin_b_ids = [id_ for id_, frase in frases.items() if not contiene_b(frase)]
# Queremos aproximadamente la mitad con B
n = min(len(con_b_ids), len(sin_b_ids))
selected_ids = con_b_ids[:n] + sin_b_ids[:n]
random.shuffle(selected_ids)
# Guardar los IDs seleccionados
with open(archivo_ids, "w") as f_ids:
for id_ in selected_ids:
f_ids.write(f"{id_}\n")
# Guardar el archivo con formato CoNLL
with open(archivo_salida, "w", encoding="utf-8") as out:
for id_ in selected_ids:
for token, label in frases[id_]:
out.write(f"{token} {label}\n")
out.write("\n")
def procesar_dev_test_balanceado(archivo_entrada, archivo_salida_dev, archivo_salida_test, archivo_ids_dev, archivo_ids_test):
frases = defaultdict(list)
with open(archivo_entrada, encoding="utf-8") as f:
for linea in f:
if linea.strip():
partes = linea.strip().split()
if len(partes) == 5:
token, doc_id, _, _, label = partes
frases[doc_id].append((token, label))
# Separar documentos con y sin B
con_b_ids = [id_ for id_, frase in frases.items() if contiene_b(frase)]
sin_b_ids = [id_ for id_, frase in frases.items() if not contiene_b(frase)]
# Balancear la división
random.shuffle(con_b_ids)
mitad_b = len(con_b_ids) // 2
dev_ids_b = con_b_ids[:mitad_b]
test_ids_b = con_b_ids[mitad_b:]
random.shuffle(sin_b_ids)
mitad_sin = len(sin_b_ids) // 2
dev_ids_sin = sin_b_ids[:mitad_sin]
test_ids_sin = sin_b_ids[mitad_sin:]
dev_ids = dev_ids_b + dev_ids_sin
test_ids = test_ids_b + test_ids_sin
random.shuffle(dev_ids)
random.shuffle(test_ids)
# Guardar los IDs
with open(archivo_ids_dev, "w") as f_dev, open(archivo_ids_test, "w") as f_test:
for id_ in dev_ids:
f_dev.write(f"{id_}\n")
for id_ in test_ids:
f_test.write(f"{id_}\n")
# Escribir los archivos
def escribir(ids, archivo_salida):
with open(archivo_salida, "w", encoding="utf-8") as out:
for id_ in ids:
for token, label in frases[id_]:
out.write(f"{token} {label}\n")
out.write("\n")
escribir(dev_ids, archivo_salida_dev)
escribir(test_ids, archivo_salida_test)
# 🛠️ Uso
procesar_training_set_balanceado("train_spacy.txt", "train_conll.txt","train_ids.txt")
procesar_dev_test_balanceado(
"valid_spacy.txt",
"dev_conll.txt",
"test_conll.txt",
"dev_ids.txt",
"test_ids.txt"
)
from datasets import load_dataset, ClassLabel, DatasetDict
from datasets import load_dataset, Dataset, DatasetDict
from collections import defaultdict
def normalizar_etiqueta(label):
if label.startswith("B-"):
return "B-PROFESION"
elif label.startswith("I-"):
return "I-PROFESION"
return label
def cargar_y_preparar_conll(paths):
def parse_conll_dataset(file_path):
raw = load_dataset("text", data_files=file_path)["train"]
tokens = []
ner_tags = []
current_tokens = []
current_tags = []
for example in raw:
line = example["text"]
if not line.strip():
if current_tokens:
tokens.append(current_tokens)
ner_tags.append(current_tags)
current_tokens = []
current_tags = []
else:
token, tag = line.strip().split()
current_tokens.append(token)
current_tags.append(normalizar_etiqueta(tag))
return {"tokens": tokens, "ner_tags": ner_tags}
# Cargar y procesar cada división
parsed = {split: parse_conll_dataset(path) for split, path in paths.items()}
# Generar label2id
all_labels = set(tag for split_data in parsed.values() for seq in split_data["ner_tags"] for tag in seq)
label_list = sorted(all_labels)
label2id = {label: i for i, label in enumerate(label_list)}
id2label = {i: label for label, i in label2id.items()}
def tag_ids(ner_tags):
return [[label2id[tag] for tag in seq] for seq in ner_tags]
dataset = DatasetDict({
split: Dataset.from_dict({
"tokens": parsed_data["tokens"],
"ner_tags": tag_ids(parsed_data["ner_tags"])
})
for split, parsed_data in parsed.items()
})
return dataset, label2id, id2label
paths = {
"train": "train_conll.txt",
"validation": "dev_conll.txt",
"test": "test_conll.txt"
}
dataset, label2id, id2label = cargar_y_preparar_conll(paths)
from datasets import Features, Sequence, ClassLabel, Value
# Asumimos que label_list ya está definido (por ejemplo: ['B-PROFESION', 'I-PROFESION', 'O'])
ner_feature = Sequence(ClassLabel(names=list(label2id.keys())))
features = Features({
"tokens": Sequence(Value("string")),
"ner_tags": ner_feature
})
# Aplicar a cada split del dataset
for split in dataset:
dataset[split] = dataset[split].cast(features)