# -*- coding: utf-8 -*-

# @Project : fastapi-tutorial
# @Date    : 20240401-1844
# @Author  : robin

import os
from dotenv import load_dotenv
# Load environment variables from openai.env file
load_dotenv("../.env")

# Read the OPENAI_API_KEY from the environment
api_key = os.getenv("OPENAI_API_KEY")
api_base = os.getenv("OPENAI_API_BASE")
os.environ["OPENAI_API_KEY"] = api_key
os.environ["OPENAI_API_BASE"] = api_base

# ChatDoc:又一个智能文档助手
# 读取pdf、excel、doc三种常见的文档格式
# 根据文档内容，智能抽取内容并输出相应格式

# 1. 安装相关的包
#安装必须的包
# ! pip install docx2txt
# ! pip install pypdf
# ! pip install nltk

# 2. 第一个测试，加载docx
#倒入必须的包
# from langchain.document_loaders import Docx2txtLoader
#
# # 定义chatdoc
# class ChatDoc():
#
#     @staticmethod
#     def getFile():
#         #读取文件
#         loader = Docx2txtLoader("example/fake.docx")
#         text = loader.load()
#         return text;
#
# docs = ChatDoc.getFile()
# print( docs )

# 2. 第二个测试，加载pdf文档
#导入必须的包
from langchain.document_loaders import PyPDFLoader

#定义chatdoc
# class ChatDoc():
#
#     @staticmethod
#     def getFile():
#         try:
#             #读取文件
#             loader = PyPDFLoader("example/fake.pdf")
#             text = loader.load()
#             return text;
#         except Exception as e:
#             print(f"Error loading files:{e}")
#
# docs = ChatDoc.getFile()
# print(docs)
#
# # 4.第三个测试，加载下excel
# #导入必须的包
# from langchain.document_loaders import UnstructuredExcelLoader
#
# #定义chatdoc
# class ChatDoc():
#     @staticmethod
#     def getFile():
#         try:
#             #读取文件
#             loader = UnstructuredExcelLoader("example/fake.xlsx",mode="elements")
#             text = loader.load()
#             return text;
#         except Exception as e:
#             print(f"Error loading files:{e}")
#
# docs = ChatDoc.getFile()
# print(docs)
#
# # 5. 整合优化，动态加载三种文件格式,增加了文本切割
# #导入必须的包
# from langchain.document_loaders import UnstructuredExcelLoader,Docx2txtLoader,PyPDFLoader
# from langchain.text_splitter import  CharacterTextSplitter
#
# #定义chatdoc
# class ChatDoc():
#     def __init__(self):
#         self.doc = None
#         self.splitText = [] #分割后的文本
#
#     def getFile(self):
#         doc = self.doc
#         loaders = {
#             "docx":Docx2txtLoader,
#             "pdf":PyPDFLoader,
#             "xlsx":UnstructuredExcelLoader,
#         }
#         file_extension = doc.split(".")[-1]
#         loader_class = loaders.get(file_extension)
#         if loader_class:
#             try:
#                 loader = loader_class(doc)
#                 text = loader.load()
#                 return text
#             except Exception as e:
#                 print(f"Error loading {file_extension} files:{e}")
#         else:
#              print(f"Unsupported file extension: {file_extension}")
#              return  None
#
#     #处理文档的函数
#     def splitSentences(self):
#         full_text = self.getFile() #获取文档内容
#         if full_text != None:
#             #对文档进行分割
#             text_split = CharacterTextSplitter(
#                 chunk_size=150,
#                 chunk_overlap=20,
#             )
#             texts = text_split.split_documents(full_text)
#             self.splitText = texts
#
#
# chat_doc = ChatDoc()
# chat_doc.doc = "example/fake.xlsx"
# chat_doc.splitSentences()
# print(chat_doc.splitText)
#
# # 6.向量化与存储索引
# #导入必须的包
# from langchain.document_loaders import UnstructuredExcelLoader,Docx2txtLoader,PyPDFLoader
# from langchain.text_splitter import  CharacterTextSplitter
# from langchain_openai import OpenAIEmbeddings
# from langchain.vectorstores import  Chroma
#
#
# # 定义chatdoc
# class ChatDoc():
#     def __init__(self):
#         self.doc = None
#         self.splitText = []  # 分割后的文本
#
#     def getFile(self):
#         doc = self.doc
#         loaders = {
#             "docx": Docx2txtLoader,
#             "pdf": PyPDFLoader,
#             "xlsx": UnstructuredExcelLoader,
#         }
#         file_extension = doc.split(".")[-1]
#         loader_class = loaders.get(file_extension)
#         if loader_class:
#             try:
#                 loader = loader_class(doc)
#                 text = loader.load()
#                 return text
#             except Exception as e:
#                 print(f"Error loading {file_extension} files:{e}")
#         else:
#             print(f"Unsupported file extension: {file_extension}")
#             return None
#
#             # 处理文档的函数
#
#     def splitSentences(self):
#         full_text = self.getFile()  # 获取文档内容
#         if full_text != None:
#             # 对文档进行分割
#             text_split = CharacterTextSplitter(
#                 chunk_size=150,
#                 chunk_overlap=20,
#             )
#             texts = text_split.split_documents(full_text)
#             self.splitText = texts
#
#     # 向量化与向量存储
#     def embeddingAndVectorDB(self):
#         embeddings = OpenAIEmbeddings()
#         db = Chroma.from_documents(
#             documents=self.splitText,
#             embedding=embeddings,
#         )
#         return db
#
#
# chat_doc = ChatDoc()
# chat_doc.doc = "example/fake.docx"
# chat_doc.splitSentences()
# chat_doc.embeddingAndVectorDB()

# 7. 索引并使用自然语言找出相关的文本块
#导入必须的包
# from langchain_community.document_loaders import UnstructuredExcelLoader,Docx2txtLoader,PyPDFLoader
# from langchain.text_splitter import  CharacterTextSplitter
# from langchain_openai import OpenAIEmbeddings
# from langchain_community.vectorstores import Chroma
#
#
# # 定义chatdoc
# class ChatDoc():
#     def __init__(self):
#         self.doc = None
#         self.splitText = []  # 分割后的文本
#
#     def getFile(self):
#         doc = self.doc
#         loaders = {
#             "docx": Docx2txtLoader,
#             "pdf": PyPDFLoader,
#             "xlsx": UnstructuredExcelLoader,
#         }
#         file_extension = doc.split(".")[-1]
#         loader_class = loaders.get(file_extension)
#         if loader_class:
#             try:
#                 loader = loader_class(doc)
#                 text = loader.load()
#                 return text
#             except Exception as e:
#                 print(f"Error loading {file_extension} files:{e}")
#         else:
#             print(f"Unsupported file extension: {file_extension}")
#             return None
#
#             # 处理文档的函数
#
#     def splitSentences(self):
#         full_text = self.getFile()  # 获取文档内容
#         if full_text != None:
#             # 对文档进行分割
#             text_split = CharacterTextSplitter(
#                 chunk_size=150,
#                 chunk_overlap=20,
#             )
#             texts = text_split.split_documents(full_text)
#             self.splitText = texts
#
#     # 向量化与向量存储
#     def embeddingAndVectorDB(self):
#         embeddings = OpenAIEmbeddings(
#             model="text-embedding-3-small"
#         )
#         db = Chroma.from_documents(
#             documents=self.splitText,
#             embedding=embeddings,
#         )
#         return db
#
#     # 提问并找到相关的文本块
#     def askAndFindFiles(self, question):
#         db = self.embeddingAndVectorDB()
#         retriever = db.as_retriever()
#         results = retriever.invoke(question)
#         return results
#
#
# chat_doc = ChatDoc()
# chat_doc.doc = "example/fake.docx"
# chat_doc.splitSentences()
# r = chat_doc.askAndFindFiles("这家公司叫什么名字?")
# print( r )

# 8. 使用多重查询提高文档检索精确度
#导入必须的包
# from langchain_community.document_loaders import UnstructuredExcelLoader,Docx2txtLoader,PyPDFLoader
# from langchain.text_splitter              import CharacterTextSplitter
# from langchain_openai                     import OpenAIEmbeddings
# from langchain_community.vectorstores     import Chroma
# #引入openai和多重向量检索
# from langchain.chat_models                import ChatOpenAI
# from langchain.retrievers.multi_query     import MultiQueryRetriever
#
# # 定义chatdoc
# class ChatDoc():
#     def __init__(self):
#         self.doc = None
#         self.splitText = []  # 分割后的文本
#
#     def getFile(self):
#         doc = self.doc
#         loaders = {
#             "docx": Docx2txtLoader,
#             "pdf":  PyPDFLoader,
#             "xlsx": UnstructuredExcelLoader,
#         }
#         file_extension = doc.split(".")[-1]
#         loader_class = loaders.get(file_extension)
#         if loader_class:
#             try:
#                 loader = loader_class(doc)
#                 text = loader.load()
#                 return text
#             except Exception as e:
#                 print(f"Error loading {file_extension} files:{e}")
#         else:
#             print(f"Unsupported file extension: {file_extension}")
#             return None
#
#             # 处理文档的函数
#
#     def splitSentences(self):
#         full_text = self.getFile()  # 获取文档内容
#         if full_text != None:
#             # 对文档进行分割
#             text_split = CharacterTextSplitter(
#                 chunk_size=150,
#                 chunk_overlap=20,
#             )
#             texts = text_split.split_documents(full_text)
#             self.splitText = texts
#
#     # 向量化与向量存储
#     def embeddingAndVectorDB(self):
#         embeddings = OpenAIEmbeddings(
#             model="text-embedding-3-small"
#         )
#         db = Chroma.from_documents(
#             documents=self.splitText,
#             embedding=embeddings,
#         )
#         return db
#
#     # 提问并找到相关的文本块
#     def askAndFindFiles(self, question):
#         db = self.embeddingAndVectorDB()
#         # 把问题交给LLM进行多角度的扩展
#         llm = ChatOpenAI(temperature=0)
#         retriever_from_llm = MultiQueryRetriever.from_llm(
#             retriever=db.as_retriever(),
#             llm=llm,
#         )
#         return retriever_from_llm.get_relevant_documents(question)
#
# chat_doc = ChatDoc()
# chat_doc.doc = "example/fake.docx"
# chat_doc.splitSentences()
#
# # 设置下logging查看生成查询
# import logging
#
# logging.basicConfig(level=logging.INFO)
# logging.getLogger("langchain.retrievers.multi_query").setLevel(logging.DEBUG)
# unique_doc = chat_doc.askAndFindFiles("公司名称是什么?")
# print(unique_doc)

# 9. 使用上下文压缩检索降低冗余信息
#导入必须的包
from langchain_community.document_loaders import UnstructuredExcelLoader,Docx2txtLoader,PyPDFLoader
from langchain.text_splitter import  CharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import  Chroma
#引入openai和多重向量检索
#from langchain.chat_models import ChatOpenAI
#from langchain.retrievers.multi_query import MultiQueryRetriever
#引入上下文压缩相关包
# from langchain_community.llms import  OpenAI
# from langchain.retrievers import ContextualCompressionRetriever
# from langchain.retrievers.document_compressors import  LLMChainExtractor
#
# # 定义chatdoc
# class ChatDoc():
#     def __init__(self):
#         self.doc = None
#         self.splitText = []  # 分割后的文本
#
#     def getFile(self):
#         doc = self.doc
#         loaders = {
#             "docx": Docx2txtLoader,
#             "pdf": PyPDFLoader,
#             "xlsx": UnstructuredExcelLoader,
#         }
#         file_extension = doc.split(".")[-1]
#         loader_class = loaders.get(file_extension)
#         if loader_class:
#             try:
#                 loader = loader_class(doc)
#                 text = loader.load()
#                 return text
#             except Exception as e:
#                 print(f"Error loading {file_extension} files:{e}")
#         else:
#             print(f"Unsupported file extension: {file_extension}")
#             return None
#
#             # 处理文档的函数
#
#     def splitSentences(self):
#         full_text = self.getFile()  # 获取文档内容
#         if full_text != None:
#             # 对文档进行分割
#             text_split = CharacterTextSplitter(
#                 chunk_size=150,
#                 chunk_overlap=20,
#             )
#             texts = text_split.split_documents(full_text)
#             self.splitText = texts
#
#     # 向量化与向量存储
#     def embeddingAndVectorDB(self):
#         embeddings = OpenAIEmbeddings(
#             model="text-embedding-3-small"
#         )
#         db = Chroma.from_documents(
#             documents=self.splitText,
#             embedding=embeddings,
#         )
#         return db
#
#     # 提问并找到相关的文本块
#     def askAndFindFiles(self, question):
#         db = self.embeddingAndVectorDB()
#         retriever = db.as_retriever()
#         llm = OpenAI(temperature=0)
#         compressor = LLMChainExtractor.from_llm(
#             llm=llm,
#         )
#         compressor_retriever = ContextualCompressionRetriever(
#             base_retriever=retriever,
#             base_compressor=compressor,
#         )
#         return compressor_retriever.get_relevant_documents(query=question)
#
#
# chat_doc = ChatDoc()
# chat_doc.doc = "example/fake.docx"
# chat_doc.splitSentences()
# #设置下logging查看生成查询
# import logging
# logging.basicConfig(level=logging.INFO)
# logging.getLogger("langchain.retrievers.multi_query").setLevel(logging.DEBUG)
# unique_doc = chat_doc.askAndFindFiles("这间公司的负债有多少？")
# print(unique_doc)

# 11. 在向量存储里使用最大边际相似性（MMR）和相似性打分
# 导入必须的包
from langchain_community.document_loaders import UnstructuredExcelLoader, Docx2txtLoader, PyPDFLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma


# 定义chatdoc
class ChatDoc():
    def __init__(self):
        self.doc = None
        self.splitText = []  # 分割后的文本

    def getFile(self):
        doc = self.doc
        loaders = {
            "docx": Docx2txtLoader,
            "pdf":  PyPDFLoader,
            "xlsx": UnstructuredExcelLoader,
        }

        file_extension = doc.split(".")[-1]
        loader_class = loaders.get(file_extension)
        if loader_class:
            try:
                loader = loader_class(doc)
                text = loader.load()
                return text
            except Exception as e:
                print(f"Error loading {file_extension} files:{e}")
        else:
            print(f"Unsupported file extension: {file_extension}")
            return None

            # 处理文档的函数

    def splitSentences(self):
        full_text = self.getFile()  # 获取文档内容
        if full_text != None:
            # 对文档进行分割
            text_split = CharacterTextSplitter(
                chunk_size=150,
                chunk_overlap=20,
            )
            texts = text_split.split_documents(full_text)
            self.splitText = texts

    # 向量化与向量存储
    def embeddingAndVectorDB(self):
        embeddings = OpenAIEmbeddings(
            model="text-embedding-3-small"
        )
        db = Chroma.from_documents(
            documents = self.splitText,
            embedding = embeddings,
        )
        return db

    # 提问并找到相关的文本块
    def askAndFindFiles(self, question):
        db = self.embeddingAndVectorDB()
        # retriever = db.as_retriever(search_type="mmr")
        retriever = db.as_retriever(search_type="similarity_score_threshold",
                                    search_kwargs={"score_threshold": .1, "k": 1})
        return retriever.get_relevant_documents(query=question)

chat_doc = ChatDoc()
chat_doc.doc = "example/fake.docx"
chat_doc.splitSentences()

# 设置下logging查看生成查询
import logging

logging.basicConfig(level=logging.INFO)
logging.getLogger("langchain.retrievers.multi_query").setLevel(logging.DEBUG)
unique_doc = chat_doc.askAndFindFiles("这家公司的地址在哪里?")
print(unique_doc)

