# Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os,time,signal
import gc
import sys
import threading
import json
from typing import Optional

import fire
from tqdm import tqdm
from transformers import Seq2SeqTrainingArguments

from llamafactory.data import get_dataset, get_template_and_fix_tokenizer
from llamafactory.extras.constants import IGNORE_INDEX
from llamafactory.extras.misc import get_device_count
from llamafactory.extras.packages import is_vllm_available
from llamafactory.hparams import get_infer_args
from llamafactory.model import load_tokenizer


if is_vllm_available():
    from vllm import LLM, SamplingParams
    from vllm.lora.request import LoRARequest


def vllm_infer(
    model_name_or_path: str,
    adapter_name_or_path: str = None,
    dataset: str = "alpaca_en_demo",
    dataset_dir: str = "data",
    template: str = "default",
    cutoff_len: int = 2048,
    max_samples: Optional[int] = None,
    vllm_config: str = "{}",
    save_name: str = "generated_predictions.jsonl",
    temperature: float = 0.95,
    top_p: float = 0.7,
    top_k: int = 50,
    max_new_tokens: int = 1024,
    repetition_penalty: float = 1.0,
    skip_special_tokens: bool = True,
    default_system: Optional[str] = None,
    enable_thinking: bool = True,
    seed: Optional[int] = None,
    pipeline_parallel_size: int = 1,
    image_max_pixels: int = 768 * 768,
    image_min_pixels: int = 32 * 32,
    video_fps: float = 2.0,
    video_maxlen: int = 128,
    batch_size: int = 1024,
):
    r"""Perform batch generation using vLLM engine, which supports tensor parallelism.

    Usage: python vllm_infer.py --model_name_or_path meta-llama/Llama-2-7b-hf --template llama --dataset alpaca_en_demo
    """
    if pipeline_parallel_size > get_device_count():
        raise ValueError("Pipeline parallel size should be smaller than the number of gpus.")

    model_args, data_args, _, generating_args = get_infer_args(
        dict(
            model_name_or_path=model_name_or_path,
            adapter_name_or_path=adapter_name_or_path,
            dataset=dataset,
            dataset_dir=dataset_dir,
            template=template,
            cutoff_len=cutoff_len,
            max_samples=max_samples,
            preprocessing_num_workers=16,
            default_system=default_system,
            enable_thinking=enable_thinking,
            vllm_config=vllm_config,
            temperature=temperature,
            top_p=top_p,
            top_k=top_k,
            max_new_tokens=max_new_tokens,
            repetition_penalty=repetition_penalty,
        )
    )

    training_args = Seq2SeqTrainingArguments(output_dir="dummy_dir")
    tokenizer_module = load_tokenizer(model_args)
    tokenizer = tokenizer_module["tokenizer"]
    template_obj = get_template_and_fix_tokenizer(tokenizer, data_args)
    template_obj.mm_plugin.expand_mm_tokens = False  # for vllm generate

    engine_args = {
        "model": model_args.model_name_or_path,
        "trust_remote_code": True,
        "dtype": model_args.infer_dtype,
        "max_model_len": cutoff_len + max_new_tokens,
        "tensor_parallel_size": (get_device_count() // pipeline_parallel_size) or 1,
        "pipeline_parallel_size": pipeline_parallel_size,
        "disable_log_stats": True,
        "enable_lora": model_args.adapter_name_or_path is not None,
    }
    if template_obj.mm_plugin.__class__.__name__ != "BasePlugin":
        engine_args["limit_mm_per_prompt"] = {"image": 4, "video": 2, "audio": 2}

    if isinstance(model_args.vllm_config, dict):
        engine_args.update(model_args.vllm_config)

    llm = LLM(**engine_args)
    # load datasets
    dataset_module = get_dataset(template_obj, model_args, data_args, training_args, "ppo", **tokenizer_module)

    train_dataset = dataset_module["train_dataset"]

    # === 尝试从原始 JSON 文件挂载 q_type ===
    try:
        dataset_info_path = os.path.join(data_args.dataset_dir, "dataset_info.json")
        if not os.path.exists(dataset_info_path):
            raise FileNotFoundError(f"dataset_info.json not found in {data_args.dataset_dir}")

        with open(dataset_info_path, "r", encoding="utf-8") as f:
            dataset_info = json.load(f)
        
        # 从 dataset_info.json 找当前数据集的文件名
        dataset_name = data_args.dataset
        if isinstance(dataset_name, list):
            if len(dataset_name) != 1:
                raise ValueError(f"Expected one dataset, got {dataset_name}")
            dataset_name = dataset_name[0]
        if dataset_name not in dataset_info:
            raise KeyError(f"{dataset_name} not found in dataset_info.json keys: {list(dataset_info.keys())}")

        file_name = dataset_info[dataset_name]["file_name"]
        data_file_path = os.path.join(data_args.dataset_dir, file_name)

        if not os.path.exists(data_file_path):
            raise FileNotFoundError(f"Cannot find raw dataset file: {data_file_path}")

        print(f"[INFO] Loading q_type from {data_file_path}")
        with open(data_file_path, "r", encoding="utf-8") as f:
            raw_data = json.load(f)

        q_types = [ex.get("q_type", None) for ex in raw_data]
        if len(q_types) != len(train_dataset):
            print(f"[WARN] Length mismatch: q_type={len(q_types)}, dataset={len(train_dataset)}")
            # 如果差距不大（比如 tokenization 有截断），可以选择截断或填充
            min_len = min(len(q_types), len(train_dataset))
            q_types = q_types[:min_len]
            train_dataset = train_dataset.select(range(min_len))

        train_dataset = train_dataset.add_column("q_type", q_types)
        print(f"[INFO] ✅ Successfully attached q_type column ({len(q_types)} samples).")
        print("Sample q_type values:", q_types[:5])

    except Exception as e:
        print(f"[WARN] ❌ Failed to attach q_type: {e}")
        print(f"[DEBUG] data_args.dataset_dir = {data_args.dataset_dir}")
        print(f"[DEBUG] data_args.dataset = {getattr(data_args, 'dataset', None)}")
        raise ValueError(f"呃，这个仓库不允许挂载不到 q——type")
    # Prepare sampling params
    sampling_params = SamplingParams(
        repetition_penalty=generating_args.repetition_penalty or 1.0,
        temperature=generating_args.temperature,
        top_p=generating_args.top_p or 1.0,
        top_k=generating_args.top_k or -1,
        stop_token_ids=template_obj.get_stop_token_ids(tokenizer),
        max_tokens=generating_args.max_new_tokens,
        skip_special_tokens=skip_special_tokens,
        seed=seed,
    )
    
    if model_args.adapter_name_or_path is not None:
        lora_request = LoRARequest("default", 1, model_args.adapter_name_or_path[0])
    else:
        lora_request = None
    
    # Store all results
    all_prompts, all_preds, all_labels, all_q_types = [], [], [], []
    
    # Batched inference
    for i in tqdm(range(0, len(train_dataset), batch_size), desc="Processing batched inference"):
        vllm_inputs, prompts, labels = [], [], []
        batch = train_dataset[i : min(i + batch_size, len(train_dataset))]
        batch_q_types = batch.get("q_type", [None] * len(batch["input_ids"]))  # ✅ 取出 q_type 列
    
        for j in range(len(batch["input_ids"])):
            # multimodal prepare
            if batch["images"][j] is not None:
                image = batch["images"][j]
                multi_modal_data = {
                    "image": template_obj.mm_plugin._regularize_images(
                        image, image_max_pixels=image_max_pixels, image_min_pixels=image_min_pixels
                    )["images"]
                }
            elif batch["videos"][j] is not None:
                video = batch["videos"][j]
                multi_modal_data = {
                    "video": template_obj.mm_plugin._regularize_videos(
                        video,
                        image_max_pixels=image_max_pixels,
                        image_min_pixels=image_min_pixels,
                        video_fps=video_fps,
                        video_maxlen=video_maxlen,
                    )["videos"]
                }
            elif batch["audios"][j] is not None:
                audio = batch["audios"][j]
                audio_data = template_obj.mm_plugin._regularize_audios(audio, sampling_rate=16000)
                multi_modal_data = {"audio": zip(audio_data["audios"], audio_data["sampling_rates"])}
            else:
                multi_modal_data = None
    
            vllm_inputs.append({"prompt_token_ids": batch["input_ids"][j], "multi_modal_data": multi_modal_data})
            prompts.append(tokenizer.decode(batch["input_ids"][j], skip_special_tokens=skip_special_tokens))
            labels.append(
                tokenizer.decode(
                    list(filter(lambda x: x != IGNORE_INDEX, batch["labels"][j])),
                    skip_special_tokens=skip_special_tokens,
                )
            )
            all_q_types.append(batch_q_types[j])  # ✅ 累积 q_type
    
        results = llm.generate(vllm_inputs, sampling_params, lora_request=lora_request)
        preds = [result.outputs[0].text for result in results]
    
        all_prompts.extend(prompts)
        all_preds.extend(preds)
        all_labels.extend(labels)
        gc.collect()
    
    # ✅ Write to file
    with open(save_name, "w", encoding="utf-8") as f:
        for text, pred, label, qtype in zip(all_prompts, all_preds, all_labels, all_q_types):
            record = {"prompt": text, "predict": pred, "label": label,"qtype":qtype}
            f.write(json.dumps(record, ensure_ascii=False) + "\n")
    
    print("*" * 70)
    print(f"{len(all_prompts)} total generated results have been saved at {save_name}.")
    print("*" * 70)
    # -------- 新增安全退出逻辑 --------

    print("*" * 70)
    print(f"{len(all_prompts)} total generated results have been saved at {save_name}.")
    print("*" * 70)

    # -------- 新增安全退出逻辑 --------
    import sys
    import threading
    import time

    print("[DEBUG] 开始清理 vLLM 资源")
    try:
        if 'llm' in locals():
            del llm
            print("[DEBUG] del llm finish")

        gc.collect()
        print("[DEBUG] gc.collect() finish")

        time.sleep(0.5)  # 等待后台线程处理

        threads = threading.enumerate()
        print("[DEBUG] 活动线程列表：", threads)

        sys.stdout.flush()
        sys.stderr.flush()
    except Exception as e:
        print(f"[WARN] 清理过程出错: {e}")

    print("[DEBUG] 推理脚本执行完毕，准备退出")
    sys.exit(0)  # 正常退出，让 CUDA 资源走正常释放流程


if __name__ == "__main__":
    fire.Fire(vllm_infer)