import os
import sys

from areal.api.cli_args import load_expr_config
from areal.dataset import get_custom_dataset
from areal.experimental.trainer import PPOTrainer
from areal.reward.math_parser import process_results
from areal.utils import logging
from areal.utils.hf_utils import load_hf_tokenizer
from areal.utils.stats_logger import StatsLogger

from tir_workflow import TIRGRPOConfig, TIRWorkflow  # isort: skip

logger = logging.getLogger("TIR Training")


def math_reward_fn(prompt, completions, prompt_ids, completion_ids, answer, **kwargs):
    # tool_using = 0.01 if 'tool_using' in kwargs and kwargs['tool_using'] else 0
    # tool_success = 0.05 if 'tool_status' in kwargs and kwargs['tool_status'] else 0

    retval, (extracted_answer, extracted_solution) = process_results(
        completions, answer
    )

    return int(retval)


def main(args):
    config, _ = load_expr_config(args, TIRGRPOConfig)

    logger.info("Starting TIR training")
    logger.info(f"Configuration: {config.experiment_name}")
    logger.info(f"Model: {config.actor.path}")
    logger.info(f"Batch size: {config.train_dataset.batch_size}")

    tokenizer = load_hf_tokenizer(config.tokenizer_path)

    # Load datasets
    train_dataset = get_custom_dataset(
        split="train", dataset_config=config.train_dataset, tokenizer=tokenizer
    )
    valid_dataset = get_custom_dataset(
        split="test", dataset_config=config.valid_dataset, tokenizer=tokenizer
    )

    # Create trainer
    with PPOTrainer(config, train_dataset, valid_dataset) as trainer:
        # Create TIR workflows
        workflow = TIRWorkflow(
            reward_fn=math_reward_fn,
            gconfig=config.gconfig,
            tokenizer=trainer.tokenizer,
            tir_config=config.tir,
            enable_thinking=False,
            dump_dir=os.path.join(
                StatsLogger.get_log_path(config.stats_logger), "generated"
            ),
        )

        eval_workflow = TIRWorkflow(
            reward_fn=math_reward_fn,
            gconfig=config.gconfig.new(temperature=0.6),
            tokenizer=trainer.tokenizer,
            tir_config=config.tir,
            enable_thinking=False,
            rollout_stat_scope="eval-rollout",
            dump_dir=os.path.join(
                StatsLogger.get_log_path(config.stats_logger), "generated-eval"
            ),
        )

        # Run training
        trainer.train(workflow, eval_workflow)


if __name__ == "__main__":
    main(sys.argv[1:])
