from typing import Optional
from dataclasses import dataclass, field
import numpy as np
from datasets import DatasetDict, load_dataset
from transformers import (
    AutoTokenizer,
    Seq2SeqTrainer,
    Seq2SeqTrainingArguments,
    AutoModelForSeq2SeqLM,
    DataCollatorForSeq2Seq,
    EarlyStoppingCallback,
)
import datasets
from instruction_re.metrics import calculate_metrics
from instruction_re.utils.evaluate_utils import (
    show_classification_report,
    parse_relation_triple_with_svo_format,
    parse_relation_triple_with_template_format,
    parse_relation_triple_with_triple_format,
)
from instruction_re.core.datatypes import (
    TaskType,
    AnswerType,
    string_to_enum,
    string_list_to_task_type,
)
from instruction_re.utils.utils import (
    load_json,
    set_global_seed,
    pick_nearest_sample_from_dataset,
)
from instruction_re.formatters import *
from instruction_re.utils.data_utils import *

import os
import sys
import logging

logger = logging.getLogger(__name__)

os.environ["TOKENIZERS_PARALLELISM"] = "false"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1,2"
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.

import logging
import os
from dataclasses import dataclass, field
from typing import Optional


import numpy as np
from datasets import load_dataset

import transformers
from transformers import (
    AutoConfig,
    AutoModelForSeq2SeqLM,
    AutoTokenizer,
    DataCollatorForSeq2Seq,
    HfArgumentParser,
    Seq2SeqTrainer,
    Seq2SeqTrainingArguments,
)

from typing import Optional


@dataclass
class ModelArguments:
    """
    Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
    """

    model_name_or_path: str = field(
        metadata={
            "help": "Path to pretrained model or model identifier from huggingface.co/models"
        }
    )
    config_name: Optional[str] = field(
        default=None,
        metadata={
            "help": "Pretrained config name or path if not the same as model_name"
        },
    )
    tokenizer_name: Optional[str] = field(
        default=None,
        metadata={
            "help": "Pretrained tokenizer name or path if not the same as model_name"
        },
    )
    cache_dir: Optional[str] = field(
        default=None,
        metadata={
            "help": "Where to store the pretrained models downloaded from huggingface.co"
        },
    )
    use_fast_tokenizer: bool = field(
        default=True,
        metadata={
            "help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."
        },
    )
    model_revision: str = field(
        default="main",
        metadata={
            "help": "The specific model version to use (can be a branch name, tag name or commit id)."
        },
    )

    trust_remote_code: bool = field(
        default=False,
        metadata={
            "help": (
                "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option "
                "should only be set to `True` for repositories you trust and in which you have read the code, as it will "
                "execute code present on the Hub on your local machine."
            )
        },
    )
    resize_position_embeddings: Optional[bool] = field(
        default=None,
        metadata={
            "help": (
                "Whether to automatically resize the position embeddings if `max_source_length` exceeds "
                "the model's position embeddings."
            )
        },
    )


@dataclass
class DataTrainingArguments:
    """
    Arguments pertaining to what data we are going to input our model for training and eval.
    """

    root_data_dir: str = field(
        default="/home/wangxiaoli/datasets/IE_INSTRUCTIONS/RE",
        metadata={"help": "root data directory"},
    )

    dataset_name: Optional[str] = field(
        default=None,
        metadata={"help": "The name of the dataset to use (via the datasets library)."},
    )

    path_to_instructions: Optional[str] = field(
        default="instructions.json",
        metadata={"help": "file with instruction prompts"},
    )

    dataset_config_name: Optional[str] = field(
        default=None,
        metadata={
            "help": "The configuration name of the dataset to use (via the datasets library)."
        },
    )

    train_file: Optional[str] = field(
        default=None,
        metadata={"help": "The input training data file (a jsonlines or csv file)."},
    )
    validation_file: Optional[str] = field(
        default=None,
        metadata={
            "help": (
                "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
            )
        },
    )
    test_file: Optional[str] = field(
        default=None,
        metadata={
            "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
        },
    )
    overwrite_cache: bool = field(
        default=False,
        metadata={"help": "Overwrite the cached training and evaluation sets"},
    )
    preprocessing_num_workers: Optional[int] = field(
        default=None,
        metadata={"help": "The number of processes to use for the preprocessing."},
    )
    max_source_length: Optional[int] = field(
        default=1024,
        metadata={
            "help": (
                "The maximum total input sequence length after tokenization. Sequences longer "
                "than this will be truncated, sequences shorter will be padded."
            )
        },
    )
    max_target_length: Optional[int] = field(
        default=128,
        metadata={
            "help": (
                "The maximum total sequence length for target text after tokenization. Sequences longer "
                "than this will be truncated, sequences shorter will be padded."
            )
        },
    )
    val_max_target_length: Optional[int] = field(
        default=None,
        metadata={
            "help": (
                "The maximum total sequence length for validation target text after tokenization. Sequences longer "
                "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. "
                "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
                "during ``evaluate`` and ``predict``."
            )
        },
    )
    pad_to_max_length: bool = field(
        default=False,
        metadata={
            "help": (
                "Whether to pad all samples to model maximum sentence length. "
                "If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
                "efficient on GPU but very bad for TPU."
            )
        },
    )
    max_train_samples: Optional[int] = field(
        default=None,
        metadata={
            "help": (
                "For debugging purposes or quicker training, truncate the number of training examples to this "
                "value if set."
            )
        },
    )
    max_eval_samples: Optional[int] = field(
        default=None,
        metadata={
            "help": (
                "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
                "value if set."
            )
        },
    )
    max_predict_samples: Optional[int] = field(
        default=None,
        metadata={
            "help": (
                "For debugging purposes or quicker training, truncate the number of prediction examples to this "
                "value if set."
            )
        },
    )
    num_beams: Optional[int] = field(
        default=1,
        metadata={
            "help": (
                "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
                "which is used during ``evaluate`` and ``predict``."
            )
        },
    )
    ignore_pad_token_for_loss: bool = field(
        default=True,
        metadata={
            "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
        },
    )

    forced_bos_token: Optional[str] = field(
        default=None,
        metadata={
            "help": (
                "The token to force as the first generated token after the decoder_start_token_id. "
                "Useful for multilingual models like mBART where the first generated token"
                "needs to be the target language token (Usually it is the target language token)"
            )
        },
    )

    def __post_init__(self):
        if (
            self.dataset_name is None
            and self.train_file is None
            and self.validation_file is None
            and self.test_file is None
        ):
            raise ValueError(
                "Need either a dataset name or a training, validation, or test file."
            )
        else:
            if self.train_file is not None:
                extension = self.train_file.split(".")[-1]
                assert extension in [
                    "csv",
                    "json",
                ], "`train_file` should be a csv or a json file."
            if self.validation_file is not None:
                extension = self.validation_file.split(".")[-1]
                assert extension in [
                    "csv",
                    "json",
                ], "`validation_file` should be a csv or a json file."
            if self.test_file is not None:
                extension = self.test_file.split(".")[-1]
                assert extension in [
                    "csv",
                    "json",
                ], "`test_file` should be a csv or a json file."
        if self.val_max_target_length is None:
            self.val_max_target_length = self.max_target_length


@dataclass
class OtherArguments:
    answer_template_choice = [
        AnswerType.SVO.value,
        AnswerType.TEMPLATE.value,
        AnswerType.TRIPLE.value,
    ]

    condition_format_choice = [
        ContextFormat.TAG.value,
        ContextFormat.CONDITIONS.value,
    ]

    def __post_init__(self):
        if (
            self.answer_template is not None
            and self.answer_template not in self.answer_template_choice
        ):
            raise ValueError(
                f"Invalid value for answer_template. Valid choices are {self.answer_template_choice}"
            )

        if (
            self.context_format is not None
            and self.context_format not in self.condition_format_choice
        ):
            raise ValueError(
                f"Invalid value for context_format. Valid choices are {self.condition_format_choice}"
            )

    replace_labels_with_special_tokens: Optional[bool] = field(
        default=False,
        metadata={"help": "Replace labels with special tokens"},
    )
    replace_sep_token: Optional[bool] = field(
        default=False,
        metadata={"help": "Replace sep token with special tokens"},
    )

    answer_template: Optional[str] = field(
        default="svo",
        metadata={
            "help": "Answer template style, triple, svo or template",
        },
    )

    context_format: Optional[str] = field(
        default="conditions",
        metadata={
            "help": "Context format style, tag or conditions",
        },
    )

    few_shot: Optional[bool] = field(
        default=False,
        metadata={
            "help": "是否为few shot 设置，如果是的话，需要设置training_num， 从训练集中抽取的样本数"
        },
    )

    training_num: Optional[int] = field(
        default=10,
        metadata={"help": "Number of samples to extract from the training set"},
    )

    training_task: List[str] = field(
        default_factory=lambda: ["RE", "NER"],
        metadata={"help": "The main task for training"},
    )

    valid_task: List[str] = field(
        default_factory=lambda: ["RE"],
        metadata={"help": "The main task for validation"},
    )


def get_parse_fn(answer_type):
    parse_output_fns = {
        AnswerType.TRIPLE.value: parse_relation_triple_with_triple_format,
        AnswerType.TEMPLATE.value: parse_relation_triple_with_template_format,
        AnswerType.SVO.value: parse_relation_triple_with_svo_format,
    }
    return parse_output_fns.get(answer_type.value)


def main():

    parser = HfArgumentParser(
        (
            ModelArguments,
            DataTrainingArguments,
            Seq2SeqTrainingArguments,
            OtherArguments,
        )
    )

    if len(sys.argv) == 2 and sys.argv[1].endswith(".yaml"):
        # If we pass only one argument to the script and it's the path to a json file,
        # let's parse it to get our arguments.
        model_args, data_args, training_args, other_args = parser.parse_yaml_file(
            os.path.abspath(sys.argv[1])
        )
    else:
        model_args, data_args, training_args, other_args = (
            parser.parse_args_into_dataclasses()
        )

    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        handlers=[logging.StreamHandler(sys.stdout)],
    )

    log_level = training_args.get_process_log_level()
    logger.setLevel(log_level)
    # datasets.utils.logging.set_verbosity(log_level)
    transformers.utils.logging.set_verbosity(log_level)
    transformers.utils.logging.enable_default_handler()
    transformers.utils.logging.enable_explicit_format()

    # Log on each process the small summary:
    logger.warning(
        f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
        + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
    )
    logger.info(f"Training/evaluation parameters {training_args}")

    if training_args.should_log:
        # The default of training_args.log_level is passive, so we set log level at info here to have that default.
        transformers.utils.logging.set_verbosity_info()

    # 从 data_args 中读取数据集的配置
    training_num = other_args.training_num
    context_format = other_args.context_format
    answer_type = string_to_enum(other_args.answer_template)

    # tokenizer_kwargs = dict(config["tokenizer"])

    set_global_seed(training_args.seed)

    # load instructions
    instructions = load_json(data_args.path_to_instructions)

    if data_args.dataset_name is None:
        raise ValueError("dataset_name is required")

    dataset_name = data_args.dataset_name
    data_config = get_data_config(
        data_args.root_data_dir, data_args.dataset_name, training_num
    )
    raw_datasets = load_dataset(
        "json",
        data_files=data_config,
        cache_dir=model_args.cache_dir,
    )

    dataset_dir = os.path.join(data_args.root_data_dir, dataset_name)
    label2token = get_label2token_by_dataset(
        dataset_dir, other_args.replace_labels_with_special_tokens
    )
    options = list(label2token.values())

    # 从 other_args 的 training_task 和 valid_task 中获取主任务和辅助任务, 映射到 TaskType
    train_tasks = string_list_to_task_type(other_args.training_task)
    valid_tasks = string_list_to_task_type(other_args.valid_task)

    # 根据主任务和辅助任务的组合，设置训练集和验证集
    # train_tasks = [
    # TaskType.RE_STRICT,
    # TaskType.NER,
    # TaskType.RC,
    # TaskType.RC_W_EXP,
    # TaskType.RC_NO_ICL,
    # TaskType.TE,
    # TaskType.EPE,
    # TaskType.TE_NO_OPTIONS,
    # ]

    # valid_tasks = [
    # TaskType.RC
    # TaskType.RC_W_EXP
    # TaskType.RE_STRICT
    # TaskType.TE_NO_OPTIONS,
    # ]

    task_str = "+".join([task.value for task in train_tasks])
    model_name = model_args.model_name_or_path.split("/")[-1]

    # 根据主任务和辅助任务的组合，设置输出目录
    training_args.output_dir = f"{training_args.output_dir}/{dataset_name}/{model_name}/{training_num}_shot/{task_str}"

    # 判断输出目录是否存在，如果存在，则对应的任务已经训练过， 退出
    # if os.path.exists(training_args["output_dir"]):
    #     print(f"output dir: {training_args['output_dir']} exists.")
    #     return

    # 是否使用 语义搜索最相似的示例
    if TaskType.RC in train_tasks:
        from sentence_transformers import SentenceTransformer

        embedding_model = SentenceTransformer(
            "sentence-transformers/paraphrase-MiniLM-L6-v2"
        )

        def get_embedding_from_data_item(x):
            return {"embedding": embedding_model.encode(x["sentence"])}

        def get_nearest_sample_from_train_set(x):
            return {
                "examples": pick_nearest_sample_from_dataset(
                    embedding_model, train_dataset_embedding, x["sentence"], 1
                )
            }

        # 增加 embedding 列
        train_dataset_embedding = raw_datasets["train"].map(
            get_embedding_from_data_item,
            batched=False,
            desc="training set embedding",
        )
        valid_dataset_embedding = raw_datasets["valid"].map(
            get_embedding_from_data_item,
            batched=False,
            desc="valid set embedding",
        )
        train_dataset_embedding.add_faiss_index(column="embedding")

        # 选择相似的样本,作为examples 返回
        raw_datasets["train"] = train_dataset_embedding.map(
            get_nearest_sample_from_train_set,
            remove_columns=["embedding"],
            load_from_cache_file=False,
            desc="get nearest examples for training set",
        )
        raw_datasets["valid"] = valid_dataset_embedding.map(
            get_nearest_sample_from_train_set,
            remove_columns=["embedding"],
            load_from_cache_file=False,
            desc="get nearest examples for valid set",
        )

    format_kwargs = {
        "context_format": context_format,
        "answer_type": answer_type,
        "dataset_name": dataset_dir,
        "replace_label_to_special_token": other_args.replace_labels_with_special_tokens,
    }

    # 格式化数据集
    train_dataset = format_datasets_from_tasks(
        train_tasks,
        raw_datasets["train"],
        instructions,
        **format_kwargs,
    )
    valid_dataset = format_datasets_from_tasks(
        valid_tasks,
        raw_datasets["valid"],
        instructions,
        **format_kwargs,
    )

    test_dataset = format_datasets_from_tasks(
        valid_tasks,
        raw_datasets["test"],
        instructions,
        **format_kwargs,
    )

    # 打印示例数据
    raw_train_num = raw_datasets["train"].num_rows
    for k, task in enumerate(train_tasks):
        start = k * raw_train_num
        print(f"task: {task.value}")
        print(train_dataset[start : start + 1])

    print(valid_dataset[0])
    print("=========processing data done=========")
    print(train_dataset.num_rows)
    print(valid_dataset.num_rows)

    # return

    datasets = DatasetDict(
        {"train": train_dataset, "valid": valid_dataset, "test": test_dataset}
    )

    datasets.shuffle(seed=training_args.seed)

    # load model and tokenizer
    model_config = AutoConfig.from_pretrained(
        (
            model_args.config_name
            if model_args.config_name
            else model_args.model_name_or_path
        ),
        cache_dir=model_args.cache_dir,
        revision=model_args.model_revision,
        trust_remote_code=model_args.trust_remote_code,
    )
    tokenizer = AutoTokenizer.from_pretrained(
        (
            model_args.tokenizer_name
            if model_args.tokenizer_name
            else model_args.model_name_or_path
        ),
        cache_dir=model_args.cache_dir,
        use_fast=model_args.use_fast_tokenizer,
        revision=model_args.model_revision,
        trust_remote_code=model_args.trust_remote_code,
    )
    model = AutoModelForSeq2SeqLM.from_pretrained(
        model_args.model_name_or_path,
        from_tf=bool(".ckpt" in model_args.model_name_or_path),
        config=model_config,
        cache_dir=model_args.cache_dir,
        revision=model_args.model_revision,
        trust_remote_code=model_args.trust_remote_code,
    )

    embedding_size = model.get_input_embeddings().weight.shape[0]
    if len(tokenizer) > embedding_size:
        model.resize_token_embeddings(len(tokenizer))
    # resume_from_checkpoint = get_last_checkpoint(training_args["output_dir"])

    # if resume_from_checkpoint is not None:
    #     model = AutoModelForSeq2SeqLM.from_pretrained(resume_from_checkpoint)
    # else:
    #     model = AutoModelForSeq2SeqLM.from_pretrained(model_path)

    if other_args.replace_sep_token:
        tokenizer.add_special_tokens({"sep_token": "<sep>"})
        model.resize_token_embeddings(len(tokenizer))
        assert tokenizer.sep_token == "<sep>", "sep token is not <sep>"

    if other_args.replace_labels_with_special_tokens:
        label_tokens = list(label2token.values())
        tokenizer.add_tokens(label_tokens)
        model.resize_token_embeddings(len(tokenizer))
        # tokenizer.add_tokens(["<organization_based_in>"])
        # model.resize_token_embeddings(len(tokenizer))
        # print(len(tokenizer))

    if context_format == ContextFormat.TAG.value:
        # 引入 special token
        tokenizer.add_tokens(
            [
                "<head>",
                "</head>",
                "<tail>",
                "</tail>",
            ]
        )

    max_target_length = data_args.max_target_length
    padding = "max_length" if data_args.pad_to_max_length else False

    def preprocess_function(examples):
        instructions = examples["instruction"]
        context = examples["context"]
        labels = examples["labels"]

        return tokenizer(
            text=instructions,
            text_pair=context,
            text_target=labels,
            truncation=True,
            padding=False,  # 使用DataCollatorForSeq2Seq进行padding
        )

    remove_columns = ["instruction", "context"]

    train_dataset = datasets["train"]
    if data_args.max_train_samples is not None:
        max_train_samples = min(len(train_dataset), data_args.max_train_samples)
        train_dataset = train_dataset.select(range(max_train_samples))
    with training_args.main_process_first(desc="train dataset map pre-processing"):
        train_dataset = train_dataset.map(
            preprocess_function,
            batched=True,
            num_proc=data_args.preprocessing_num_workers,
            remove_columns=remove_columns,
            load_from_cache_file=not data_args.overwrite_cache,
            desc="Running tokenizer on train dataset",
        )

    if training_args.do_eval:
        max_target_length = data_args.val_max_target_length
    eval_dataset = datasets["valid"]
    if data_args.max_eval_samples is not None:
        max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
        eval_dataset = eval_dataset.select(range(max_eval_samples))
    with training_args.main_process_first(desc="validation dataset map pre-processing"):
        eval_dataset = eval_dataset.map(
            preprocess_function,
            batched=True,
            num_proc=data_args.preprocessing_num_workers,
            remove_columns=remove_columns,
            load_from_cache_file=not data_args.overwrite_cache,
            desc="Running tokenizer on validation dataset",
        )

    # tokenized_dataset = datasets.map(
    #     preprocess_function, batched=True, remove_columns=["instruction", "context"]
    # )

    label_pad_token_id = (
        -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
    )
    seq2seq_collator = DataCollatorForSeq2Seq(
        tokenizer,
        model=model,
        label_pad_token_id=label_pad_token_id,
        pad_to_multiple_of=8 if training_args.fp16 else None,
    )

    # training_arguments = Seq2SeqTrainingArguments(
    #     **training_args,
    # )

    def remove_special_tokens(text):
        return text.replace(tokenizer.pad_token, "").replace(tokenizer.eos_token, "")

    parse_fn = get_parse_fn(answer_type)

    def compute_metric(eval_preds):
        preds, labels = eval_preds

        if isinstance(preds, tuple):
            preds = preds[0]

        preds = np.where(preds != -100, preds, tokenizer.pad_token_id)
        decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=False)

        # 移除<pad> <sep> </s> token
        decoded_preds = [remove_special_tokens(p) for p in decoded_preds]

        # Replace -100 in the labels as we can't decode them.
        labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
        decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=False)
        decoded_labels = [remove_special_tokens(p) for p in decoded_labels]

        batch_pred_triples = [
            parse_fn(p, options, tokenizer.sep_token) for p in decoded_preds
        ]
        batch_truth_triples = [
            parse_fn(p, options, tokenizer.sep_token) for p in decoded_labels
        ]

        # 对比batch_pred_triples 和 batch_truth_triples 每个元素的数量

        # 统计预测错误的数量,并保存到文件
        # error_count = 0
        # short_count = 0
        # error_pred = []

        # for pred, truth, pred_triple, truth_triple in zip(
        #     decoded_preds, decoded_labels, batch_pred_triples, batch_truth_triples
        # ):
        #     if pred_triple != truth_triple:
        #         error_count += 1
        #         error_pred.append(
        #             {
        #                 "pred": pred,
        #                 "truth": truth,
        #             }
        #         )
        #     if len(pred_triple) < len(truth_triple):
        #         short_count += 1

        # print("预测错误的数量:", error_count)
        # print("预测文本的长度小于真实文本的数量:", short_count)

        # count = 0
        # for pred, truth in zip(batch_pred_triples, batch_truth_triples):
        #     if len(pred) < len(truth):
        #         count += 1

        # print("预测元组的数量小于等于真实元组的数量的数量:", count)

        # # 保存error_pred 为json文件
        # with open("error_pred.json", "w") as f:
        #     json.dump(error_pred, f, indent=4)

        epoch_metrics = calculate_metrics(
            batch_pred_triples, batch_truth_triples, options=options
        )
        result = {}
        for k, v in epoch_metrics.items():
            for k1, v1 in v.items():
                result[f"{k}_{k1}"] = v1

        show_classification_report(epoch_metrics)
        return result

    early_stop = EarlyStoppingCallback(early_stopping_patience=5)

    trainer = Seq2SeqTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=valid_dataset,
        data_collator=seq2seq_collator,
        tokenizer=tokenizer,
        compute_metrics=compute_metric,
        callbacks=[early_stop],
    )

    trainer.train()
    # trainer.train(resume_from_checkpoint=True)

    # trainer.evaluate(tokenized_dataset["train"])
    # trainer.evaluate(tokenized_dataset["valid"])
    # trainer.evaluate(tokenized_dataset["test"])


if __name__ == "__main__":
    main()
