# coding:utf-8
from ipex_llm.transformers import AutoModelForCausalLM, AutoModelForSpeechSeq2Seq
from transformers import AutoTokenizer, AutoProcessor, WhisperProcessor
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from datetime import datetime
import pytz


class Config:
    """配置类，存储所有需要的参数"""
    # model_path = "../models/Qwen/Qwen2___5-1___5B-Instruct"  # 对话模型路径
    model_path = "../models/Qwen/Qwen2___5-3B-Instruct"  # 对话模型路径
    data_path = "../data"  # 数据路径
    persist_dir = "../chroma_db"  # 向量库路径
    embedding_model_path = "../models/AI-ModelScope/bge-small-zh-v1___5"  # 向量模型路径
    image_model_path = '../models/LLM-Research/Phi-3-vision-128k-instruct'  # 图片理解模型路径
    audio_model_path = '../models/AI-ModelScope/whisper-large-v3'  # 语音转文字模型路径
    rerank_path = '../models/rerank_model'
    save_dir = "../uploaded_images/"  # 保存图片文件路径
    filename = "uploaded_image.jpg"  # 保存图片文件名
    max_new_tokens = 1024  # 对话模型最大生成长度
    weather_key = " " # 和风天气（QWeather，官网：https://www.qweather.com）
    api_token = " "  # API 密钥(https://api.siliconflow.cn/v1/chat/completions)


    def __init__(self):
        self._chat_model = None
        self._chat_tokenizer = None
        self._embed_model = None
        self._image_to_text_model = None
        self._image_to_text_model_tokenizer = None
        self._audio_to_text_model = None
        self._audio_to_text_model_tokenizer = None

    @classmethod
    def set_mode(cls, inference_mode: str):
        """根据推理模式设置API配置"""
        if inference_mode == "在线推理":
            cls.use_chat_api = True
            cls.use_image_to_text_api = True
            cls.use_audio_to_text_api = True
        elif inference_mode == "本地推理":
            cls.use_chat_api = False
            cls.use_image_to_text_api = False
            cls.use_audio_to_text_api = False

    """懒加载"""
    @property
    def chat_model(self):
        if self._chat_model is None:
            self._chat_model = AutoModelForCausalLM.from_pretrained(
                self.model_path, load_in_4bit=True,
                trust_remote_code=True, model_hub='modelscope'
            ).half().to("xpu")
            self._chat_tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=True)

            # self._chat_model = AutoModelForCausalLM.load_low_bit(
            #      self.model_path, trust_remote_code=True).eval().to('xpu')
            # self._chat_tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=True)
        return self._chat_model, self._chat_tokenizer

    @property
    def embed_model(self):
        if self._embed_model is None:
            self._embed_model = HuggingFaceEmbedding(model_name=self.embedding_model_path)
        return self._embed_model

    @property
    def image_to_text_model(self):
        if self._image_to_text_model is None:
            self._image_to_text_model = AutoModelForCausalLM.from_pretrained(
                self.image_model_path, trust_remote_code=True, load_in_low_bit="sym_int4",
                _attn_implementation="eager",
                modules_to_not_convert=["vision_embed_tokens"]
            ).half().to('xpu')
            self._image_to_text_model_tokenizer = AutoProcessor.from_pretrained(self.image_model_path, trust_remote_code=True)
        return self._image_to_text_model, self._image_to_text_model_tokenizer

    @property
    def audio_to_text_model(self):
        if self._audio_to_text_model is None:
            self._audio_to_text_model = AutoModelForSpeechSeq2Seq.from_pretrained(
                self.audio_model_path, load_in_4bit=True, optimize_model=False, use_cache=True, trust_remote_code=True
            ).eval().to('cpu')
            self._audio_to_text_model_tokenizer = WhisperProcessor.from_pretrained(
                self.audio_model_path, trust_remote_code=True
            )
            # self._audio_to_text_model = AutoModelForSpeechSeq2Seq.load_low_bit(self.audio_model_path,
            #                                                            trust_remote_code=True).eval()
            # self._audio_to_text_model_tokenizer = WhisperProcessor.from_pretrained(self.audio_model_path,
            #                                                                trust_remote_code=True)

        return self._audio_to_text_model, self._audio_to_text_model_tokenizer




