from dotenv import load_dotenv
import os
from langchain_openai import ChatOpenAI
from langchain_community.embeddings import XinferenceEmbeddings
# 同义Qwen
from langchain_community.llms.tongyi import Tongyi
from langchain_community.chat_models import ChatTongyi
from langchain_community.embeddings import DashScopeEmbeddings
import logging

# 加载 .env 文件
load_dotenv()

# 获取 API URL
base_url = os.getenv("virtaicloud_base_url")
api_key = os.getenv("virtaicloud_api_key")
modelname = os.getenv("virtaicloud_modelname")

# 获取当前路径
curr_path = os.getcwd()
log_path = os.path.join(curr_path, 'log')
# 设置日志记录
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

def get_qwen_models(model="qwen-turbo"):
    """
    加载千问系列大模型
    """

    llm = Tongyi(model=model, temperature=0.1, top_p=0.7, max_tokens=1024,
                #  base_url=base_url, api_key=api_key, modelname=modelname
                 )

    chat = ChatTongyi(model=model, temperature=0.01, top_p=0.2, max_tokens=1024)

    return llm, chat

def get_bge_embeddings():
    server_url = os.getenv("bge_server_url")
    model_uid = os.getenv("bge_model_uid")

    embed = XinferenceEmbeddings(server_url=server_url, model_uid=model_uid)
    return embed

def image_to_base64(image_path, max_size=(640, 640)):
    try:
        # 打开图像文件
        with Image.open(image_path) as img:
            # 如果图像模式不是RGB，则转换为RGB
            if img.mode != 'RGB':
                img = img.convert('RGB')

            # 计算新的尺寸以保持纵横比
            img.thumbnail(max_size)

            # 创建一个内存中的字节流用于保存调整后的图像
            buffered = io.BytesIO()
            
            # 将图像保存到内存字节流中，格式为JPEG
            img.save(buffered, format="JPEG")

            # 获取字节流中的数据并进行Base64编码
            img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
            
            return img_str
    except Exception as e:
        print(f"Error processing image: {e}")
        return None