import os
os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'

from langchain_community.document_loaders import TextLoader,DirectoryLoader
from langchain.chains import LLMChain
from langchain.text_splitter import CharacterTextSplitter
from langchain.prompts import PromptTemplate
from langchain_community.vectorstores import FAISS
from langchain_core.language_models.llms import LLM
from sklearn.feature_extraction.text import TfidfVectorizer
from transformers import AutoModel, AutoTokenizer, pipeline
import torch
from typing import Any, List, Optional
from pydantic import Field, BaseModel

def test_no_model():

    # 文件加载
    # loader1 = DirectoryLoader('./study')
    # documents1 = loader1.load()

    loader2 = TextLoader("./study/doc1.txt",encoding="utf-8")
    docs2 = loader2.load()
    print(f"TextLoader加载文件: {docs2}\n")

    splitter =characterTextSplitter = CharacterTextSplitter(
        separator="\n", # 分隔符
        chunk_size=10, # 块大小，默认按照分隔符，不会严格按照这个大小
        chunk_overlap=1 ,# 相邻块之间的重叠字符数，默认按照分隔符，不会严格按照这个大小
        )
    result = splitter.split_documents(docs2)
    print(f"文本分割: {result}\n")

    vectorizer = TfidfVectorizer()
    features = vectorizer.fit_transform([doc.page_content for doc in result])
    print(f"特征提取: {features}\n")

    feature_tensor = torch.from_numpy(features.toarray())
    normalized_features = torch.nn.functional.normalize(feature_tensor, p=2, dim=1)
    print(f"数据处理: {normalized_features}\n")


class ChatGLM2(LLM, BaseModel):
    model_path: str = Field(..., description="模型路径")
    model: Any = Field(None, description="模型")
    tokenizer: Any = Field(None, description="分词器")
    
    def __init__(self, model_path: str, **kwargs):
        super().__init__(model_path=model_path, **kwargs)
        self.model_path = model_path
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.model_path, 
            trust_remote_code=True
        )
        self.model = AutoModel.from_pretrained(
            self.model_path, 
            trust_remote_code=True
        ).half().cuda()
        self.model = self.model.eval()

    def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
        response, _ = self.model.chat(self.tokenizer, prompt, history=[])
        return response

    @property
    def _identifying_params(self):
        return {"model_path": self.model_path}

    @property
    def _llm_type(self) -> str:
        return "chatglm2"
    
def test_model():
    try:
        # 初始化自定义的 ChatGLM2 类
        model_path = "D:/demo/gitee/python/models/chatglm2-6b-int4"
        llm = ChatGLM2(model_path=model_path)
        
        # 创建提示模板
        prompt = PromptTemplate(
            template="问题: {question}\n回答:",
            input_variables=["question"]
        )
        
        # 创建chain
        chain = prompt | llm
        
        # 执行推理
        result = chain.invoke({"question": "你好"})
        print(f"推理结果: {result}\n")
        
    except Exception as e:
        print(f"推理出错: {e}")


if __name__ == "__main__":
    # 不使用模型
    # test_no_model()

    # 使用模型
    test_model()
