import os
# 环境配置（使用HF镜像加速下载）
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
import torch
from multiprocessing import Pool, cpu_count
from transformers import AutoModelForCausalLM, AutoTokenizer



# 全局配置参数
MODEL_NAME = "Qwen/Qwen3-Reranker-0.6B"
MAX_LENGTH = 8192
TOKEN_FALSE = "no"
TOKEN_TRUE = "yes"
SYSTEM_PROMPT = (
    "<|im_start|>system\n"
    "Judge whether the Document meets the requirements based on the Query and the Instruct provided. "
    "Note that the answer can only be \"yes\" or \"no\".<|im_end|>\n<|im_start|>user\n"
)
SUFFIX_PROMPT = "<|im_end|>\n<|im_start|>assistant\n<|FunctionCallBegin|>\n\n</think>\n\n"


def format_instruction(instruction, query, doc):
    """格式化输入指令、查询和文档"""
    if instruction is None:
        instruction = 'Given a web search query, retrieve relevant passages that answer the query'
    return f"<Instruct>: {instruction}\n<Query>: {query}\n<Document>: {doc}"


def init_worker():
    """初始化每个进程的模型和分词器（进程内复用）"""
    global tokenizer, model, token_false_id, token_true_id, prefix_tokens, suffix_tokens
    
    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, padding_side='left')
    
    # 加载模型（CPU模式，多进程下每个进程单独加载）
    model = AutoModelForCausalLM.from_pretrained(
        MODEL_NAME,
        torch_dtype=torch.float32,  # CPU模式使用float32更稳定
        device_map="cpu"
    ).eval()
    
    # 预计算关键token的ID和前缀/后缀token
    token_false_id = tokenizer.convert_tokens_to_ids(TOKEN_FALSE)
    token_true_id = tokenizer.convert_tokens_to_ids(TOKEN_TRUE)
    prefix_tokens = tokenizer.encode(SYSTEM_PROMPT, add_special_tokens=False)
    suffix_tokens = tokenizer.encode(SUFFIX_PROMPT, add_special_tokens=False)


def process_batch(pairs):
    """处理单个批次的pairs，返回分数（每个进程内调用）"""
    global tokenizer, model, token_false_id, token_true_id, prefix_tokens, suffix_tokens
    
    # 处理输入（直接操作token IDs，避免不必要的encode/decode）
    # 先对输入文本进行tokenize
    inputs = tokenizer(
        pairs,
        padding=False,
        truncation='longest_first',
        return_attention_mask=False,
        max_length=MAX_LENGTH - len(prefix_tokens) - len(suffix_tokens)
    )
    
    # 拼接前缀和后缀token IDs
    for i in range(len(inputs['input_ids'])):
        inputs['input_ids'][i] = prefix_tokens + inputs['input_ids'][i] + suffix_tokens
    
    # 补齐padding并转换为tensor
    inputs = tokenizer.pad(
        inputs,
        padding=True,
        return_tensors="pt",
        max_length=MAX_LENGTH
    )
    
    # 模型推理（无梯度计算）
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits[:, -1, :]  # 取最后一个token的logits
        
        # 计算yes/no的分数
        true_scores = logits[:, token_true_id]
        false_scores = logits[:, token_false_id]
        batch_scores = torch.stack([false_scores, true_scores], dim=1)
        batch_scores = torch.nn.functional.log_softmax(batch_scores, dim=1)
        return batch_scores[:, 1].exp().tolist()  # 返回yes的概率


def parallel_rerank(pairs, num_workers=None):
    """并行处理所有pairs，利用多核CPU"""
    # 自动确定进程数（默认使用所有可用CPU核心）
    num_workers = num_workers or cpu_count()
    print(f"使用 {num_workers} 个CPU核心进行并行处理")
    
    # 拆分pairs为多个子批次（尽量均匀分配）
    batch_size = max(1, len(pairs) // num_workers)
    batches = [
        pairs[i:i+batch_size] 
        for i in range(0, len(pairs), batch_size)
    ]
    
    # 创建进程池并处理任务
    with Pool(processes=num_workers, initializer=init_worker) as pool:
        # 并行处理每个批次
        results = pool.map(process_batch, batches)
    
    # 合并所有结果（展平为一维列表）
    return [score for batch_result in results for score in batch_result]


# 示例用法
if __name__ == "__main__":
    # 测试数据
    task = 'Given a web search query, retrieve relevant passages that answer the query'
    query = "What is the capital of China?"
    
    # 10个文档（5个相关，5个不相关）
    documents = [
        "The capital of China is Beijing.",  # 相关
        "Beijing is the political and cultural center of China.",  # 相关
        "China's capital city is located in the northern part of the country.",  # 相关
        "Beijing hosted the 2008 Summer Olympics.",  # 相关（间接）
        "The Forbidden City is located in the heart of Beijing.",  # 相关（间接）
        "Gravity is a force that attracts two bodies towards each other.",  # 不相关
        "Photosynthesis is the process by which plants use sunlight to synthesize foods.",  # 不相关
        "Quantum mechanics describes nature at the scale of atoms and subatomic particles.",  # 不相关
        "The telephone was invented by Alexander Graham Bell in 1876.",  # 不相关
        "Earthquakes are caused by the sudden release of energy in the Earth's crust.",  # 不相关
    ]
    
    # 生成待处理的pairs（一个查询与所有10个文档配对）
    pairs = [format_instruction(task, query, d) for d in documents]
    
    # 并行计算分数
    scores = parallel_rerank(pairs, num_workers=4)
    
    # 输出结果
    print(f"查询: {query}")
    print("文档重排结果:")
    for i, (doc, score) in enumerate(zip(documents, scores), 1):
        relevance = "相关" if score > 0.5 else "不相关"
        print(f"{i}. 分数: {score:.4f} ({relevance}) - {doc}")
