__ENABLED__ = True

try:
    import wandb
    from wandb.sdk.internal import datastore
    from wandb.sdk.internal.datastore import (
        LEVELDBLOG_HEADER_LEN, LEVELDBLOG_BLOCK_LEN, LEVELDBLOG_LAST,
        LEVELDBLOG_MIDDLE, LEVELDBLOG_FULL, LEVELDBLOG_FIRST, strtobytes,
        LEVELDBLOG_HEADER_IDENT, LEVELDBLOG_HEADER_MAGIC, LEVELDBLOG_HEADER_VERSION
    )
    from wandb.proto import wandb_internal_pb2

except ImportError:
    __ENABLED__ = False

if __ENABLED__:
    import json
    import os
    import queue
    import logging
    from collections import deque
    from json import JSONDecodeError
    from pathlib import Path
    import threading
    from watchdog.events import FileSystemEventHandler
    from watchdog.observers import Observer
    from aim import Run
    import struct
    import time
    import zlib

    __INITED__ = False

    # 配置日志记录
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)

    # 全局锁
    init_lock = threading.Lock()


    def wait_for_wandb_initialization():
        """
        等待 WandB 初始化完成。
        """
        while not wandb.run:
            time.sleep(1)


    class FileSystemEventHandlerWithQueue(FileSystemEventHandler):
        """
        文件系统事件处理器，监控文件修改并将新数据添加到队列中。
        """

        def __init__(self, event_queue):
            self.event_queue = event_queue

        def on_modified(self, event):
            if event.is_directory:
                return
            if not event.src_path.endswith("wandb-summary.json"):
                return
            with open(event.src_path, 'rb') as f:
                data = f.read()
                if len(data) == 0:
                    return
                try:
                    record = json.loads(data.decode('utf-8'))
                    self.event_queue.put(record)
                    logger.debug(f"File modified and data added to queue: {event.src_path}")
                except JSONDecodeError:
                    logger.debug(f"JSON decode error for file: {event.src_path} data: {data}")
                    return


    class RecentDataBuffer:
        """
        最近数据缓冲区，用于存放最近的500条数据并进行重复数据判断。
        """

        def __init__(self, max_size=500):
            self.max_size = max_size
            self.data_queue = deque(maxlen=max_size)
            self.data_set = set()

        def add_data(self, data):
            if data in self.data_set:
                logger.debug(f"Duplicate data detected: {data}")
                return False  # 数据重复
            if len(self.data_queue) >= self.max_size:
                oldest_data = self.data_queue.popleft()
                self.data_set.remove(oldest_data)
                logger.debug(f"Removed oldest data: {oldest_data}")
            self.data_queue.append(data)
            self.data_set.add(data)
            logger.debug(f"Added new data: {data}")
            return True  # 数据添加成功

        def is_duplicate(self, data):
            return data in self.data_set

        def get_recent_data(self):
            return list(self.data_queue)


    class DataStoreWithQueue(datastore.DataStore):
        """
        带有队列的 DataStore 类，用于监控数据变化并处理历史数据。
        """

        def __init__(self):
            super().__init__()
            self._enable_watch = False
            self._queue = None
            self._watch_call = False

        def _read_header(self):
            header = self._fp_read(LEVELDBLOG_HEADER_LEN)
            assert len(header) == LEVELDBLOG_HEADER_LEN, "header is {} bytes instead of the expected {}".format(
                len(header), LEVELDBLOG_HEADER_LEN)
            ident, magic, version = struct.unpack("<4sHB", header)
            if ident != strtobytes(
                    LEVELDBLOG_HEADER_IDENT) or magic != LEVELDBLOG_HEADER_MAGIC or version != LEVELDBLOG_HEADER_VERSION:
                raise Exception("Invalid header")
            self._index += len(header)

        def open_for_watch(self, fname):
            self._queue = queue.Queue()
            self._enable_watch = True
            observer = Observer()
            observer.schedule(FileSystemEventHandlerWithQueue(self._queue), str(Path(fname).parent / 'files'),
                              recursive=True)
            observer.start()
            self._fname = fname
            thread = threading.Thread(target=self._begin_scan_db_file_to_queue, daemon=True)
            thread.start()


            logger.info(f"Started watching {fname} for changes")

        def _fp_read(self, length):
            data = self._fp.read(length)
            while len(data) < length:
                time.sleep(5)
                data += self._fp.read(length - len(data))
            return data

        def scan_record(self):
            assert self._opened_for_scan, "file not open for scanning"
            header = self._fp_read(LEVELDBLOG_HEADER_LEN)
            if len(header) == 0:
                return None
            if len(header) < LEVELDBLOG_HEADER_LEN:
                raise AssertionError("header too short")

            fields = struct.unpack("<IHB", header)
            checksum, dlength, dtype = fields
            self._index += LEVELDBLOG_HEADER_LEN
            data = self._fp_read(dlength)
            if not len(data) == dlength:
                raise AssertionError("wrong data")

            checksum_computed = zlib.crc32(data, self._crc[dtype]) & 0xFFFFFFFF
            if checksum != checksum_computed:
                raise AssertionError(f'Checksum mismatch: {checksum} != {checksum_computed}')

            self._index += dlength
            return dtype, data

        def scan_data(self):
            offset = self._index % LEVELDBLOG_BLOCK_LEN
            space_left = LEVELDBLOG_BLOCK_LEN - offset
            if space_left < LEVELDBLOG_HEADER_LEN:
                pad_check = strtobytes("\x00" * space_left)
                pad = self._fp.read(space_left)
                assert pad == pad_check, "invalid padding"
                self._index += space_left

            record = self.scan_record()
            if record is None:
                return None
            dtype, data = record
            if dtype == LEVELDBLOG_FULL:
                return data

            assert dtype == LEVELDBLOG_FIRST, f"expected record to be type {LEVELDBLOG_FIRST} but found {dtype}"
            while True:
                record = self.scan_record()
                if record is None:
                    return None
                dtype, new_data = record
                if dtype == LEVELDBLOG_LAST:
                    data += new_data
                    break
                assert dtype == LEVELDBLOG_MIDDLE, f"expected record to be type {LEVELDBLOG_MIDDLE} but found {dtype}"
                data += new_data
            return data

        def scan_history(self):
            while True:
                try:
                    data = self.scan_data()
                except BaseException as e:
                    logger.error(f"Error scanning data: {e}")
                    continue
                pb = wandb_internal_pb2.Record()
                try:
                    pb.ParseFromString(data)
                except BaseException as e:
                    logger.error(f"Error parsing protobuf: {e}")
                    continue
                record_type = pb.WhichOneof("record_type")
                if record_type == "history":
                    items = pb.history.item
                    return {item.key: float(item.value_json) for item in items}

        def _begin_scan_db_file_to_queue(self):
            self.open_for_scan(self._fname)
            while True:
                self._queue.put(self.scan_history())
                logger.debug("Scanned history and put it in the queue")

        def watch(self, watch_fn):
            assert self._enable_watch and not self._watch_call, "watch function already enabled or disabled"

            data_buffer = RecentDataBuffer()

            while True:
                record = self._queue.get()
                if data_buffer.is_duplicate(record.get('_step')):
                    continue
                data_buffer.add_data(record.get('_step'))
                watch_fn(record)

    class AimMetricLogger:

        def __init__(self, aim_repo, experiment, description, config):
            aim_run = Run(
                repo=aim_repo,
                system_tracking_interval=None,
                capture_terminal_logs=True,
                experiment=experiment
            )

            aim_run.description = description
            for k, v in config.items():
                aim_run[k] = v

            self.aim_run = aim_run

        def log(self, record):
            step = record.get('_step')
            epoch = record.get('epoch')
            timestamp = record.get('_timestamp')
            for key, value in record.items():
                if key.startswith('_'):
                    continue
                value = record.get(key)
                if value is None:
                    continue
                try:
                    tag, name = key.rsplit("/", 1)
                    if "train" in tag:
                        context = {'subset': 'train'}
                    elif "val" in tag:
                        context = {'subset': 'val'}
                    elif "test" in tag:
                        context = {'subset': 'test'}
                    else:
                        context = {'tag': tag}
                    if global_step := record.get(tag + '/global_step'):
                        step = global_step
                except ValueError:
                    name, context = key, {}
                try:
                    if timestamp:
                        self.aim_run._tracker._track(value, track_time=timestamp, name=name,
                                                     step=step, epoch=epoch, context=context)
                    else:
                        self.aim_run.track(value, name=name, step=step, epoch=epoch, context=context)
                except ValueError as e:
                    logger.warning(f"Error tracking value to Aim: {e}")


    def wandb_to_aim_func(wandb_run: "wandb.wandb_sdk.wandb_run.Run" = None, aim_repo=os.getenv('AIM_REPO')):
        if aim_repo is None:
            logger.debug("AIM repo not set. Will not report metrics to aim")
            return

        if wandb_run is None:
            logger.info("Waiting for wandb run...")
            wait_for_wandb_initialization()
            wandb_run = wandb.run

        data_store = DataStoreWithQueue()

        logger.info("Wandb was initialized, waiting for Aim to running...")
        aim_metric_logger = AimMetricLogger(
            aim_repo=aim_repo,
            experiment=wandb_run.project,
            description=wandb_run.notes,
            config=wandb_run.config
        )
        logger.warning("Beginning reporting metrics wandb to aim...")
        data_store.open_for_watch(wandb_run.settings.sync_file)

        data_store.watch(aim_metric_logger.log)


    def call_wandb_to_aim():
        global __INITED__
        with init_lock:
            if not __INITED__:
                __INITED__ = True
                threading.Thread(target=wandb_to_aim_func,name='WandbToAim-Thread',
                                 daemon=True).start()


    if not __INITED__:
        call_wandb_to_aim()
