# Adapted from
# https://github.com/volcengine/verl/blob/v0.4.0/verl/single_controller/base/megatron/worker.py
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from verl.single_controller.base.megatron.worker import MegatronWorker


def _init_hf_config_and_tf_config(self, model_path, dtype, override_model_config, override_transformer_config):
    from transformers import AutoConfig

    from verl.models.mcore import hf_to_mcore_config
    from verl.utils import hf_tokenizer
    from verl.utils.fs import copy_to_local
    from verl.utils.model import update_model_config

    # Step 1: initialize the tokenizer
    self.local_path = copy_to_local(model_path)
    self.tokenizer = hf_tokenizer(self.local_path)

    # Step 2: get the hf
    hf_config = AutoConfig.from_pretrained(self.local_path, trust_remote_code=self.config.rollout.trust_remote_code)

    # Step 3: override the hf config
    override_config_kwargs = {
        "bos_token_id": self.tokenizer.bos_token_id,
        "eos_token_id": self.tokenizer.eos_token_id,
        "pad_token_id": self.tokenizer.pad_token_id,
    }
    override_config_kwargs.update(override_model_config.get("model_config", {}))
    self.share_embeddings_and_output_weights = getattr(hf_config, "tie_word_embeddings", False)
    update_model_config(hf_config, override_config_kwargs=override_config_kwargs)
    self.architectures = getattr(hf_config, "architectures", None)
    if self.rank == 0:
        print(f"Model config after override: {hf_config}")
    tf_config = hf_to_mcore_config(hf_config, dtype, **override_transformer_config)

    def add_optimization_config_to_tf_config(tf_config, verl_model_config):
        # add optimization config to tf_config, e.g. checkpointing
        if verl_model_config.get("enable_gradient_checkpointing", False):
            gradient_checkpointing_cfg = dict(verl_model_config.get("gradient_checkpointing_kwargs", dict()))
            tf_config.recompute_method = gradient_checkpointing_cfg.get("activations_checkpoint_method", "full")
            tf_config.recompute_granularity = gradient_checkpointing_cfg.get("activations_checkpoint_granularity", "full")
            tf_config.recompute_num_layers = gradient_checkpointing_cfg.get("activations_checkpoint_num_layers", -1)

    add_optimization_config_to_tf_config(tf_config, self.config.model)

    print(f"TF config: {tf_config}")
    self.hf_config = hf_config
    self.tf_config = tf_config


MegatronWorker._init_hf_config_and_tf_config = _init_hf_config_and_tf_config
