from langchain_openai import ChatOpenAI
from dotenv import load_dotenv
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from langchain_huggingface import HuggingFacePipeline
import os
from langchain_community.embeddings import DashScopeEmbeddings


# 创建一个聊天模型类
class ChatModel:

    # 定义初始化函数
    def __init__(self):
        # 1获取环境变量
        load_dotenv()

    # 获取在线模型
    def get_line_model(self):
        return ChatOpenAI(model=os.getenv("model_name"), temperature=0.7)

    # 获取本地模型
    def get_local_model(self):
        # 加载模型
        model_path = os.getenv("base_model")
        # 1 加载分词器
        tokenizer = AutoTokenizer.from_pretrained(model_path)
        # 2 加载权重
        model = AutoModelForCausalLM.from_pretrained(model_path)
        # 3创建管道
        pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=512, temperature=0.7)
        # 4 创建HuggingFacePipeline 对象,openAI适合的模型类型
        hf_pipe = HuggingFacePipeline(pipeline=pipe)
        # 返回llm模型
        return hf_pipe

    # 获取向量嵌入模型
    def get_embedding_model(self):
        # 加载模型
        model = DashScopeEmbeddings(
            model=os.getenv("embedding_model"),
            dashscope_api_key=os.getenv("dash_scope_key")
        )
        return model
