# coding=utf-8
"""
    @project: wlou
    
    @file： llm.py
    @date：2024/3/6 11:48
    @desc:
"""
from typing import List, Dict
from urllib.parse import urlparse, ParseResult

from langchain_core.messages import (
    HumanMessage,
    SystemMessage,
)
# from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import WLOUBaseModel
from langchain_huggingface import ChatHuggingFace, HuggingFacePipeline
# from langchain.prompts import PromptTemplate

# 导入自定义模型需要的库
# from llama_cpp import Llama
# from typing import Optional, List, Mapping, Any
# from langchain.llms.base import LLM
# from langchain import PromptTemplate,  LLMChain


def get_base_url(url: str):
    parse = urlparse(url)
    result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='',
                             query='',
                             fragment='').geturl()
    return result_url[:-1] if result_url.endswith("/") else result_url


class LocalChatModel(WLOUBaseModel):
    def __init__(self, model_name, cache_folder, model_kwargs):
        # 设置自定义缓存目录
        self.cache_folder = cache_folder
        self.model_name = model_name
        self.model_kwargs = model_kwargs
        super().__init__()
    @staticmethod
    def is_cache_model():
        return False
    def preprocess(text):
        text = text.replace("\n", "\\n").replace("\t", "\\t")
        return text

    def postprocess(text):
        return text.replace("\\n", "\n").replace("\\t", "\t")
    def custome(self):
        # # 初始化自定义LLM类
        # llm = self.CustomLLM()

        # # 使用自定义LLM生成一个回复
        # result = llm("昨天有一个客户抱怨他买了花给女朋友之后，两天花就枯了，你说作为客服我应该怎么解释？")

        # # 打印生成的回复
        # print(result)
        pass
    def answer(self, text, sample=True, top_p=1, temperature=0.7):
        llm = HuggingFacePipeline.from_model_id(
            model_id=self.model_name,
            task="text-generation",
            device=self.device,
            pipeline_kwargs=dict(
                max_new_tokens=512,
                do_sample=False,
                repetition_penalty=1.03,
            ),
            model_kwargs=self.model_kwargs,  # 可选的量化配置
        )
        chat_model = ChatHuggingFace(llm=llm)
        messages = [
            SystemMessage(content="You're a helpful assistant"),
            HumanMessage(
                content="What happens when an unstoppable force meets an immovable object?"
            ),
        ]
        ai_msg = chat_model.invoke(messages)
        print(ai_msg.content)
        return ai_msg.content
    @staticmethod
    def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
        return LocalChatModel(
            model_name=model_name, 
            cache_folder=model_credential.get('cache_folder'),
            device=model_credential.get('device', 'cpu'),
            model_kwargs={'trust_remote_code': True}
        )






# # 自定义的LLM类，继承自基础LLM类
# class CustomLLM(LLM):
#     # 模型的名称和路径常量
#     model_name = 'llama-2-7b-chat.Q4_K_M.gguf'

#     # 该方法使用Llama库调用模型生成回复
#     def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
#         prompt_length = len(prompt) + 5
#         # 初始化Llama模型，指定模型路径和线程数
#         llm = Llama(model_path='/home/huangj/03_Llama/', n_threads=4)
#         # 使用Llama模型生成回复
#         response = llm(f"Q: {prompt} A: ", max_tokens=256)
        
#         # 从返回的回复中提取文本部分
#         output = response['choices'][0]['text'].replace('A: ', '').strip()

#         # 返回生成的回复，同时剔除了问题部分和额外字符
#         return output[prompt_length:]

#     # 返回模型的标识参数，这里只是返回模型的名称
#     @property
#     def _identifying_params(self) -> Mapping[str, Any]:
#         return {"name_of_model": self.model_name}

#     # 返回模型的类型，这里是"custom"
#     @property
#     def _llm_type(self) -> str:
#         return "custom"
#     def template(self):

#         # 定义输入模板，该模板用于生成花束的描述
#         template = """
#                     为以下的花束生成一个详细且吸引人的描述：
#                     花束的详细信息：
#                     ```{flower_details}```
#                 """

#         prompt = PromptTemplate(template=template, 
#                             input_variables=["flower_details"])

#         # 创建LLMChain实例
#         llm_chain = LLMChain(prompt=prompt, llm=llm)

#         # 需要生成描述的花束的详细信息
#         flower_details = "12支红玫瑰，搭配白色满天星和绿叶，包装在浪漫的红色纸中。"

#         # 打印生成的花束描述
#         print(llm_chain.run(flower_details))
#         pass