
from langchain.chains import LLMChain
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.llms import Tongyi
from langchain_qianwen import Qwen_v1
from typing import Any, List
from langchain_community.embeddings.dashscope import DashScopeEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.docstore.document import Document
from langchain_community.vectorstores import Chroma
from langchain.chains import RetrievalQA
import numpy as np
import os

#设置提示词topic
os.environ["DASHSCOPE_API_KEY"] = "secrect key"
if __name__ == "__main__":
    jock_template = "给我讲个有关 {topic} 的笑话"
    prompttem = ChatPromptTemplate.from_template(jock_template)
    prompt=prompttem.format(topic="产品经理")

    llm = Qwen_v1(
        model_name="qwen-turbo",
        temperature=0.18,
        streaming=True,
        #callbacks=[StreamingStdOutCallbackHandler()], - 
    )

    #chain = LLMChain(llm=llm,prompt=prompt)

    # for s in chain.stream({"topic": "产品经理"}):
    #     print(s, end="", flush=True)

#多个组件使用llm,prompt,搜索jocktemp+query
class DemoChain():
    def __init__(self, llm, prompt) -> Any:
        self.llm=llm
        self.prompt =prompttem.format(topic="{query}")
    def run(self, query, context=None) ->Any:
        if context is not None:
            prompt=self.prompt.format(query=query, context=context)
        else:
            prompt =self.prompt.format(query=query)
            print ("query=%s -> prompt=%s"%(query,prompt))
            print("*"*10)
            response=self.llm.invoke(prompt)
            return response
chain=DemoChain(llm=llm, prompt=prompt)
#print(chain.run(query="天道酬勤"))

#导入文本
current_dir = os.path.dirname(os.path.abspath(__file__))
knowledge_base_path = os.path.join(current_dir, "知识库.txt")
with open(knowledge_base_path, "r", encoding="utf-8") as file:
    texts = file.read()

#使用DashScopeembedding嵌入文字
embedding=DashScopeEmbeddings(model="text-embedding-v1")
query_result = embedding.embed_query("中国")
#print("embedding query.shape=", np.array(query_result).shape)

#分割文字
#text_splitter = CharacterTextSplitter(chunk_size=2048, chunk_overlap=0) - splite excat length
class TextSpliter(CharacterTextSplitter):
    def __init__(self, separator: str ="\n\n", **kwargs: Any):
        super().__init__(separator, **kwargs)
    def split_text(self, text: str) -> List[str]:
        texts = text.split("\n")
        texts = [Document(page_content=text, metadata={"from": "知识库.txt"}) for text in texts] #储存metadata在本地文件中
        return texts
text_splitter=TextSpliter() #function save all text in sentence
texts = text_splitter.split_text(texts) #create_documents get each words，分割的内容及储存位置
texts1 = [text.page_content for text in texts] #纯内容
#print (texts) 

#转换向量
db=Chroma.from_documents(texts,embedding)
retriever =db.as_retriever()

# 检索关键词中国在知识库文件中
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
query = "中国"
#rsp = qa.run({"query": query})
#print(rsp) 


