# Requires transformers>=4.51.0
import os
import torch
from transformers import AutoModel, AutoTokenizer, AutoModelForCausalLM
from sentence_transformers import CrossEncoder
import config

def set_environment():
    """设置环境变量"""
    os.environ["HF_ENDPOINT"] = config.HF_ENDPOINT
    os.environ["HF_HUB_DISABLE_XET"] = config.HF_HUB_DISABLE_XET

def format_instruction(instruction, query, doc):
    """
    格式化指令、查询和文档
    
    Args:
        instruction: 指令
        query: 查询
        doc: 文档
        
    Returns:
        格式化后的文本
    """
    if instruction is None:
        instruction = 'Given a web search query, retrieve relevant passages that answer the query'
    output = "<Instruct>: {instruction}\n<Query>: {query}\n<Document>: {doc}".format(instruction=instruction, query=query, doc=doc)
    return output

def load_rerank_model(model_config=None):
    """
    加载Qwen3 Reranker模型
    
    Args:
        model_config: 模型配置，如果为None则使用config.py中的默认配置
        
    Returns:
        加载的模型、tokenizer和相关配置
    """
    if model_config is None:
        model_config = config.RERANK_CONFIG
    
    # 根据配置确定模型路径
    if model_config["local_model_path"]:
        print(f"正在加载本地模型: {model_config['local_model_path']}")
        model_path = model_config["local_model_path"]
    else:
        print(f"正在从HuggingFace加载模型: {model_config['model_id']}")
        model_path = model_config["model_id"]
    
    # 如果使用CrossEncoder加载方式
    if model_config["use_cross_encoder"]:
        print("使用CrossEncoder加载模型")
        model = CrossEncoder(model_path)
        return {
            "model": model,
            "use_cross_encoder": True
        }
    
    # 使用传统方式加载模型
    # 加载tokenizer
    tokenizer = AutoTokenizer.from_pretrained(model_path, padding_side='left')
    
    # 根据参数加载模型
    if model_config["use_flash_attention"] and model_config["use_fp16"]:
        model = AutoModelForCausalLM.from_pretrained(
            model_path, 
            torch_dtype=torch.float16, 
            attn_implementation="flash_attention_2"
        ).cuda().eval()
        print("已启用flash_attention_2加速和FP16精度")
    elif model_config["use_flash_attention"]:
        model = AutoModelForCausalLM.from_pretrained(
            model_path, 
            attn_implementation="flash_attention_2"
        ).eval()
        print("已启用flash_attention_2加速")
    elif model_config["use_fp16"]:
        model = AutoModelForCausalLM.from_pretrained(
            model_path, 
            torch_dtype=torch.float16
        ).cuda().eval()
        print("已启用FP16精度")
    else:
        model = AutoModelForCausalLM.from_pretrained(model_path).eval()
    
    # 配置模型参数
    token_false_id = tokenizer.convert_tokens_to_ids("no")
    token_true_id = tokenizer.convert_tokens_to_ids("yes")
    max_length = 8192
    
    prefix = "<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be \"yes\" or \"no\".<|im_end|>\n<|im_start|>user\n"
    suffix = "<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n"
    prefix_tokens = tokenizer.encode(prefix, add_special_tokens=False)
    suffix_tokens = tokenizer.encode(suffix, add_special_tokens=False)
    
    return {
        "model": model,
        "tokenizer": tokenizer,
        "token_false_id": token_false_id,
        "token_true_id": token_true_id,
        "max_length": max_length,
        "prefix_tokens": prefix_tokens,
        "suffix_tokens": suffix_tokens,
        "use_cross_encoder": False
    }

def process_inputs(model_data, pairs):
    """
    处理输入文本
    
    Args:
        model_data: 模型数据（包含tokenizer等）
        pairs: 文本对列表
        
    Returns:
        处理后的输入
    """
    tokenizer = model_data["tokenizer"]
    max_length = model_data["max_length"]
    prefix_tokens = model_data["prefix_tokens"]
    suffix_tokens = model_data["suffix_tokens"]
    
    inputs = tokenizer(
        pairs, padding=False, truncation='longest_first',
        return_attention_mask=False, max_length=max_length - len(prefix_tokens) - len(suffix_tokens)
    )
    for i, ele in enumerate(inputs['input_ids']):
        inputs['input_ids'][i] = prefix_tokens + ele + suffix_tokens
    inputs = tokenizer.pad(inputs, padding=True, return_tensors="pt", max_length=max_length)
    for key in inputs:
        inputs[key] = inputs[key].to(model_data["model"].device)
    return inputs

def compute_scores(model_data, inputs):
    """
    计算相关性得分
    
    Args:
        model_data: 模型数据
        inputs: 处理后的输入
        
    Returns:
        相关性得分
    """
    model = model_data["model"]
    token_true_id = model_data["token_true_id"]
    token_false_id = model_data["token_false_id"]
    
    with torch.no_grad():
        batch_scores = model(**inputs).logits[:, -1, :]
        true_vector = batch_scores[:, token_true_id]
        false_vector = batch_scores[:, token_false_id]
        batch_scores = torch.stack([false_vector, true_vector], dim=1)
        batch_scores = torch.nn.functional.log_softmax(batch_scores, dim=1)
        scores = batch_scores[:, 1].exp().tolist()
    return scores

def rerank_documents(model_data, queries, documents, instruction=None):
    """
    对文档进行重排序
    
    Args:
        model_data: 模型数据
        queries: 查询列表
        documents: 文档列表
        instruction: 指令
        
    Returns:
        文档的相关性得分
    """
    # 如果使用CrossEncoder
    if model_data.get("use_cross_encoder", False):
        model = model_data["model"]
        pairs = [[query, doc] for query, doc in zip(queries, documents)]
        scores = model.predict(pairs)
        return scores.tolist()
    
    # 使用传统方式
    pairs = [format_instruction(instruction, query, doc) for query, doc in zip(queries, documents)]
    inputs = process_inputs(model_data, pairs)
    scores = compute_scores(model_data, inputs)
    return scores

def run_example():
    """运行示例"""
    # 设置环境变量
    set_environment()
    
    # 加载模型
    model_data = load_rerank_model()
    
    # 默认任务
    task = 'Given a web search query, retrieve relevant passages that answer the query'
    
    # 重排序文档
    scores = rerank_documents(model_data, config.EXAMPLE_QUERIES, config.EXAMPLE_DOCUMENTS, task)
    
    # 打印结果
    print("得分: ", scores)

if __name__ == "__main__":
    run_example()
