from FlagEmbedding import FlagAutoModel
BGE_LARGE_ZH_V1_5_MODEL = '/data1/models/BAAI/bge-large-zh-v1.5'
BGE_RERANKER_LARGE = "/data1/models/BAAI/bge-reranker-large"

LLM_CHAT_API_KEY="3KnSx1QPYUdmziUzLC81yHnCLvCgqA2A"

QWEN_QWQ_CHAT_API_URL="http://192.168.211.131:8900/v1"
QWEN_QWQ_CHAT_API_MODEL_NAME="QwQ-32B"

QWEN_32B_CHAT_API_URL="http://192.168.211.131:8901/v1"
QWEN_32B_CHAT_API_MODEL_NAME="QWEN-32B"

from transformers import AutoTokenizer, AutoModel
from FlagEmbedding import FlagModel

def load_auto_tokenizer():
    tokenizer = AutoTokenizer.from_pretrained(BGE_LARGE_ZH_V1_5_MODEL)
    return tokenizer


def load_auto_model():
    model = AutoModel.from_pretrained(
        BGE_LARGE_ZH_V1_5_MODEL, 
    )
    return model


def load_model():
    model = FlagAutoModel.from_finetuned(
        BGE_LARGE_ZH_V1_5_MODEL,
        query_instruction_for_retrieval="Represent this sentence for searching relevant passages: ",
        devices="cuda:0",   # if not specified, will use all available gpus or cpu when no gpu available
    )
    return model

def load_flag_model(query_instruction_for_retrieval=None, use_fp16=True):
    model = FlagModel(BGE_LARGE_ZH_V1_5_MODEL, devices="cuda:0",  
                    query_instruction_for_retrieval=query_instruction_for_retrieval, use_fp16=use_fp16)
    return model
def load_flag_reranker_large_model(devices="cuda:1", use_fp16=True):
    from FlagEmbedding import FlagReranker
    print(f"Loading reranker model: {BGE_RERANKER_LARGE} ....")
    reranker = FlagReranker(BGE_RERANKER_LARGE, use_fp16=use_fp16, devices=devices) 
    # Setting use_fp16 to True speeds up computation with a slight performance degradation
    return reranker

def get_openai_client(api_key=LLM_CHAT_API_KEY,base_url=QWEN_32B_CHAT_API_URL,model_name=QWEN_32B_CHAT_API_MODEL_NAME):
    from openai import OpenAI
    client = OpenAI(api_key=api_key, base_url=base_url)
    return client, model_name

def get_langchain_chat_openai(openai_api_base=QWEN_32B_CHAT_API_URL, model_name=QWEN_32B_CHAT_API_MODEL_NAME, openai_api_key=LLM_CHAT_API_KEY):
    from langchain_community.chat_models import ChatOpenAI
    openai = ChatOpenAI(openai_api_base=openai_api_base, model_name=model_name, openai_api_key=openai_api_key)
    return openai

def get_huggingface_embedding(model_name=BGE_LARGE_ZH_V1_5_MODEL):
    from llama_index.embeddings.huggingface import HuggingFaceEmbedding
    
    embed_model = HuggingFaceEmbedding(model_name=model_name)
    return embed_model
