# Copyright 2025 The Langfun Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpoint aggregator for Langfun evaluations."""

import concurrent.futures
import dataclasses
import os
import threading
import time
from typing import Annotated, Iterator

from langfun.core.eval.v2 import evaluation as evaluation_lib
from langfun.core.eval.v2 import example as example_lib
from langfun.core.eval.v2 import reporting
from langfun.core.eval.v2.runners import base

import pyglove as pg


class CheckpointMonitor(base.RunnerBase):
  """Runner for monitoring checkpoing files generated by other runners.

  Currently checkpoint monitor only supports aggregating per-example
  checkpoint files.
  """

  NAME = 'checkpoint_monitor'

  plugins = [
      reporting.HtmlReporter(),
  ]

  checkpoint_pattern: Annotated[
      str, 'The glob pattern of the checkpoint files to monitor.'
  ] = 'checkpoint_*.bagz'

  monitor_inprogress_files: Annotated[
      bool,
      'If True, monitor in-progress files to aggregate.'
  ] = False

  poll_interval: Annotated[
      int,
      'The interval in seconds to poll for new checkpoint files.'
  ] = 5

  max_aggregation_threads: Annotated[
      int,
      'The maximum number of threads to aggregate checkpoints.'
  ] = 128

  bypass_old_ckpt_files_with_non_oop_errors: Annotated[
      bool,
      'If True, ignore old checkpoint files with non-oop errors.'
  ] = True

  ckpt_start_time: Annotated[
      float | None,
      (
          'The timestamp to treat checkpoint files modified before this '
          'time as old.'
      )
  ] = None

  @dataclasses.dataclass
  class _AggregationEntry:
    evaluation: evaluation_lib.Evaluation
    output_dir: str
    inprogress_file_pattern: str | None
    ckpt_file_pattern: str
    example_ids_inprogress: set[int]
    example_ids_to_be_aggregated: set[int]
    example_ids_being_aggregated: set[int]
    completion_lock: threading.Lock
    is_completed: bool = False

  def _on_bound(self):
    super()._on_bound()
    self._monitor_thread = None
    self._aggregation_entries = []
    self._aggregator_pool = None
    self._error = None
    if self.ckpt_start_time is None:
      self.rebind(ckpt_start_time=time.time(), skip_notification=True)
    self._ckpt_bypass_timestamp: dict[str, int] = {}

  def start(self):
    # Reset the experiment state before getting started.
    self.current_run.experiment.reset()

    # Signal the start of the run.
    self.on_run_start()

    # Start the non-leaf nodes.
    for node in self.current_run.experiment.nonleaf_nodes:
      self.on_experiment_start(node)

    for evaluation in self.current_run.experiment.leaf_nodes:
      # This is not precise, but we at least notify example start.
      if not self.current_run.filter or self.current_run.filter(evaluation):
        self.on_experiment_start(evaluation)

        # Signal the start of the examples if we are not monitoring in-progress
        # files.
        if not self.monitor_inprogress_files:
          for example_id in self.current_run.examples_to_evaluate(evaluation):
            self._mark_example_started(evaluation, example_id)

        # Create the aggregation entries for polling.
        output_dir = self.current_run.output_dir(evaluation)
        self._aggregation_entries.append(
            self._AggregationEntry(
                evaluation=evaluation,
                output_dir=output_dir,
                ckpt_file_pattern=os.path.join(
                    output_dir, self.checkpoint_pattern
                ),
                inprogress_file_pattern=os.path.join(
                    output_dir, '*.inprogress'
                ) if self.monitor_inprogress_files else None,
                example_ids_to_be_aggregated=(
                    self.current_run.examples_to_evaluate(evaluation)
                ),
                example_ids_inprogress=set(),
                example_ids_being_aggregated=set(),
                completion_lock=threading.Lock(),
                is_completed=False,
            )
        )
      else:
        self.on_experiment_skipped(evaluation)

    self._aggregator_pool = concurrent.futures.ThreadPoolExecutor(
        max_workers=self.max_aggregation_threads
    )
    self._monitor_thread = threading.Thread(target=self._monitor_loop)
    self._monitor_thread.start()

  def join(self):
    if self._monitor_thread:
      self._monitor_thread.join()
    if self._error is not None:
      raise self._error

  def run(self):
    self.start()
    self.join()

  def _monitor_loop(self):
    while not self._error and any(
        not e.is_completed for e in self._aggregation_entries
    ):
      for entry in self._aggregation_entries:
        if not entry.example_ids_to_be_aggregated:
          continue

        # Signal example processing.
        if self.monitor_inprogress_files:
          inprogress_files = pg.io.glob(entry.inprogress_file_pattern)
          for inprogress_file in inprogress_files:
            example_id = int(
                os.path.basename(inprogress_file).split('.')[0]
            )
            if example_id not in entry.example_ids_inprogress:
              self._mark_example_started(entry.evaluation, example_id)
              entry.example_ids_inprogress.add(example_id)

        for filepath in pg.io.glob(entry.ckpt_file_pattern):
          example_id = int(
              os.path.basename(filepath).split('.')[0].split('_')[-1]
          )
          if example_id in entry.example_ids_to_be_aggregated:
            last_modified_time = pg.io.getmtime(filepath)
            bypass_timestamp = self._ckpt_bypass_timestamp.get(filepath)
            if (
                bypass_timestamp is not None
                and last_modified_time <= bypass_timestamp
            ):
              continue

            # Remove example ID from the set to avoid duplicate processing.
            entry.example_ids_to_be_aggregated.remove(example_id)
            entry.example_ids_being_aggregated.add(example_id)

            # It could be that the example has been processed before, but the
            # inprogress file was removed. In this case, we should signal the
            # example has started before completing it.
            if example_id not in entry.example_ids_inprogress:
              self._mark_example_started(entry.evaluation, example_id)
              entry.example_ids_inprogress.add(example_id)

            self._aggregator_pool.submit(
                self._aggregate, entry, filepath, example_id, last_modified_time
            )
            pg.logging.info(
                '[%s] Aggregating example %d from %s...',
                entry.evaluation.id,
                example_id,
                filepath,
            )
      time.sleep(self.poll_interval)

    if self._error is None:
      self.on_run_complete()
    else:
      self.on_run_abort(self._error)

  def _aggregate(
      self,
      entry: _AggregationEntry,
      ckpt_filepath: str,
      example_id: int,
      last_modified_time: float,
  ):
    """Aggregate an example from a checkpoint file."""
    try:
      loaded_examples = entry.evaluation.state.load(
          ckpt_filepath,
          example_input_by_id=entry.evaluation.example_input_by_id,
          # Example metadata may be expensive to load, and is not used by
          # metric aggregation. Thus we do not load example metadata.
          load_example_metadata=False
      )
      assert len(loaded_examples) >= 1, loaded_examples
      # Ocassionally the per-example checkpoint file may contain the same
      # example processed multiple times. We only need to aggregate the last
      # example.
      example = loaded_examples[-1]
      if (
          self.bypass_old_ckpt_files_with_non_oop_errors
          and last_modified_time < self.ckpt_start_time
          and example.error is not None
          and not example.error.tag.startswith('MappingError')
      ):
        entry.example_ids_being_aggregated.remove(example_id)
        entry.example_ids_to_be_aggregated.add(example_id)
        self._ckpt_bypass_timestamp[ckpt_filepath] = last_modified_time
        pg.logging.info(
            '[%s] Bypassing old checkpoint file with non-oop errors (%s) '
            'for example %d, last_modified_time: %s, ckpt_start_time: %s',
            entry.evaluation.id,
            ckpt_filepath,
            example_id,
            last_modified_time,
            self.ckpt_start_time,
        )
        return
    except BaseException as e:  # pylint: disable=broad-except
      error_info = pg.ErrorInfo.from_exception(e)
      pg.logging.error(
          '[%s] Failed to aggregate example %d: %s',
          entry.evaluation.id,
          example_id,
          error_info
      )
      example = example_lib.Example(
          id=example_id,
          input=entry.evaluation.example_input_by_id(example_id),
          error=error_info,
      )

    # This will skip processing but still allow metrics to be collected.
    # `process` will never be called for evaluation, thus we do not
    # need to setup/teardown evaluation.
    try:
      example = entry.evaluation.evaluate(
          example, reevaluate_upon_previous_errors=False
      )
    except BaseException as e:  # pylint: disable=broad-except
      pg.logging.error(
          '[%s] Unexpected error found during evaluating example %d from %s.',
          entry.evaluation.id,
          example_id,
          ckpt_filepath,
      )
      self._error = e
      entry.example_ids_being_aggregated.remove(example_id)
      return

    example.newly_processed = True
    pg.logging.info(
        '[%s] Successfully aggregated example %d from %s.',
        entry.evaluation.id,
        example_id,
        ckpt_filepath,
    )

    try:
      self.on_example_complete(entry.evaluation, example)
    except BaseException as e:  # pylint: disable=broad-except
      # Plugin failures should be raised to the user.
      self._error = e

    entry.example_ids_being_aggregated.remove(example_id)

    # Remove the in-progress file to indicate that the example has been
    # processed.
    try:
      pg.io.rm(os.path.join(entry.output_dir, f'{example_id}.inprogress'))
    except FileNotFoundError:
      pass

    if (not self._error
        and not entry.example_ids_to_be_aggregated
        and not entry.example_ids_being_aggregated):
      with entry.completion_lock:
        if not entry.is_completed:
          entry.is_completed = True
          try:
            self.on_experiment_complete(entry.evaluation)
          except BaseException as e:  # pylint: disable=broad-except
            # Plugin failures should be raised to the user.
            self._error = e

  def _mark_example_started(
      self,
      evaluation: evaluation_lib.Evaluation,
      example_id: int
  ) -> None:
    """Mark an example as started."""
    example = example_lib.Example(
        id=example_id, input=evaluation.example_input_by_id(example_id),
    )
    example.start_time = time.time()
    self.on_example_start(evaluation, example)

    # We update evaluation state with the inprogress status so the evaluation
    # HTML could show remotely in-progress examples.
    evaluation.state.update(example, in_progress=True)

  def _run(self, evaluations: list[evaluation_lib.Evaluation]):
    raise NotImplementedError('Not needed in checkpoint monitor.')

  def _evaluate_items(
      self,
      evaluation: evaluation_lib.Evaluation,
      items: Iterator[example_lib.Example]
  ) -> None:
    raise NotImplementedError('Not needed in checkpoint monitor.')
