from langchain_community.chat_models import ChatTongyi
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.llms.tongyi import Tongyi
from langchain_huggingface import HuggingFaceEmbeddings
from transformers import AutoModelForCausalLM, AutoTokenizer
from lc_frame.lc_models.api.customllm import MyLocalModel
from langchain_community.embeddings import XinferenceEmbeddings
#from openai import OpenAI



def get_qwen():
    # chat = AutoModelForCausalLM.from_pretrained("/gemini/code/arua_completion_project/lc_frame/lc_models/llms/Tongyi-Finance-14B-Chat", device_map="auto", trust_remote_code=True).eval()
    # chat = MyLocalModel.from_config({"model_name_or_path": "/gemini/pretrain3/qwen2-7b-instruct"})
    # llm=chat
    chat = ChatTongyi(model='qwen-plus', temperature=0.01, top_p=0.2, max_tokens=1024)
    llm = Tongyi(model='qwen-plus', temperature=0.1, top_p=0.7, max_tokens=1024)
    #embed = DashScopeEmbeddings(model="text-embedding-v1")
    #embed = HuggingFaceEmbeddings(model_name="/gemini/pretrain/all-mpnet-base-v2")
    embed = HuggingFaceEmbeddings(model_name="/gemini/pretrain2/bge-large-zh-v1.5")
    
    return chat, llm, embed

# def get_local_qwen():
#     openai_api_key = "EMPTY"
#     openai_api_base = "http://direct.virtaicloud.com:26697/v1"
    
#     client = OpenAI(
#         api_key=openai_api_key,
#         base_url=openai_api_base,
#     )
    
#     chat_response = client.chat.completions.create(
#         model="Qwen2.5-0.5B-Instruct",
#         messages=[
#             {"role": "system", "content": "你是一个很有用的助手。"},
#             {"role": "user", "content": "中华人民共和国的首都是哪里？"},
#         ]
#     )
#     print("Chat response:", chat_response)

