import os
import gc

import torch
from torch import nn

import model_executor.models.llama
import utils
from model_executor.model_loader import get_model


class Worker:
    """ A worker class that executes (a partition of) the model on a GPU.

    Each worker is associated with a single GPU. The worker is responsible for
    maintaining the KV cache and executing the model on the GPU. In case of
    distributed inference, each worker is assigned a partition of the model.
    """

    def __init__(
            self,
            local_rank,
            is_driver_worker: bool = False
    ):
        self.device = None
        self.baseline_snapshot = None
        self.local_rank=local_rank
        self.is_driver_worker=is_driver_worker

        # 启用torch_profiler
        torch_profiler_trace_dir = "../torch_profiler"  # envs.VLLM_TORCH_PROFILER_DIR
        if not os.path.exists(torch_profiler_trace_dir):
            os.makedirs(torch_profiler_trace_dir)

        # logger.info("Profiling enabled. Traces will be saved to: %s",
        #             torch_profiler_trace_dir)
        self.profiler = torch.profiler.profile(
            activities=[
                torch.profiler.ProfilerActivity.CPU,
                torch.profiler.ProfilerActivity.CUDA,
            ],
            with_stack=True,
            on_trace_ready=torch.profiler.tensorboard_trace_handler(
                torch_profiler_trace_dir, use_gzip=True))

    def start_profile(self):
        if self.profiler is None:
            raise RuntimeError("Profiler is not enabled.")
        self.profiler.start()

    def stop_profile(self):
        if self.profiler is None:
            raise RuntimeError("Profiler is not enabled.")
        self.profiler.stop()

    def init_device(self):
        # TODO change cuda to ascend
        self.device = torch.device(f"cuda:{self.local_rank}")
        torch.cuda.set_device(self.device)
        gc.collect()
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats()
        self.baseline_snapshot = utils.MemorySnapshot()

        # Initialize the distributed environment.
        init_worker_distributed_environment()

        # Set random seed.
        # set_random_seed(self.model_config.seed)

    def load_model(self):
        # logger.info("Starting to load model %s...", self.model_config.model)
        with utils.DeviceMemoryProfiler() as m:
            self.model = self.get_model(vllm_config=self.vllm_config)

        self.model_memory_usage = m.consumed_memory
        # logger.info("Loading model weights took %.4f GB",
        #             self.model_memory_usage / float(2**30))


        # 编译选项
        # if self.vllm_config.compilation_config.level ==\
        #     CompilationLevel.DYNAMO_AS_IS and supports_dynamo():
        #     backend = self.vllm_config.compilation_config.init_backend(
        #         self.vllm_config)
        #     self.model = torch.compile(
        #         self.model,
        #         fullgraph=envs.VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE,
        #         backend=backend)

    def get_model(self) -> nn.Module:
        return self.model

    def execute_model(self):
        pass


def init_worker_distributed_environment():
    pass