from langchain.text_splitter import CharacterTextSplitter,MarkdownTextSplitter,RecursiveCharacterTextSplitter
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
from langchain_community.document_loaders import UnstructuredFileLoader

from langchain_community.document_loaders import AsyncHtmlLoader,Docx2txtLoader
from langchain_community.document_transformers import Html2TextTransformer
import os
from langchain_community.document_loaders import PyPDFLoader

from urlextract import URLExtract
from fastapi import APIRouter, Query, UploadFile, File,Form,Body
router = APIRouter(
    prefix= "/kdb",
    tags= ["knowledgeRouter"],
    responses= {404: {"description": "Not found path knowledge"}}
)


work_dir = "."

# from graph_database.knowledgeGraph import 
# 生成分段内容
def splitDocument(docPath):
    docs = getLoaderHandler(docPath)
    # print("==:",docs)
    # pages = loader.loaderDocument()
    # print("pages::",pages)
    urls=[]
    # additionDocs=[]
    text = ""
    for p in docs:
        extractor = URLExtract()
        c = p.page_content
        text+=c+"。"
        urls.extend(extractor.find_urls(c))
        print("找到文档中包含的超链接：：：",urls)
    if len(urls)>0:
        additionDocs=""
        # try:
        #     additionDocs = extractURL(urls=urls)[0].page_content
        # except:
        #     additionDocs=""
        text += additionDocs+"。"
    # docs.extend(additionDocs)
    # docs = CharacterTextSplitter(docs)
    chunk_size = 400
    chunk_overlap = 20
    text = text.replace('\n','')
    r_splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size, chunk_overlap=chunk_overlap)

    rs = r_splitter.split_text(text)
    return rs

def extractURL(urls):
    #对于文档中包含超链接的，单独爬取超链接并追加到文档中
    try:
        print("爬取超链接::",urls)
        loader = AsyncHtmlLoader(urls)
        docs = loader.load()
        html2text = Html2TextTransformer()
        docs_transformed = html2text.transform_documents(docs)
        print(docs_transformed)
        # print
        return docs_transformed
    except:
        return []
    
#加载txt文件
def load_txt_file(txt_file):    
    loader = UnstructuredFileLoader(os.path.join(work_dir, txt_file))
    docs = loader.load()
    print(docs[0].page_content[:100])
    return docs

#加载doc文件
def load_doc_splitter(txt_file):    
    loader = UnstructuredWordDocumentLoader(os.path.join(work_dir, txt_file))
    docs = loader.load()
    print(docs[0].page_content[:100])
    return docs

#加载md文件
def load_md_file(md_file):    
    loader = UnstructuredMarkdownLoader(os.path.join(work_dir, md_file))
    docs = loader.load()
    print(docs[0].page_content[:100])
    return docs

#加载pdf文件
def load_pdf_file(pdf_file):    
    loader = UnstructuredFileLoader(os.path.join(work_dir, pdf_file))
    docs = loader.load()
    print('pdf:\n',docs[0].page_content[:100])
    return docs

#分割txt文件
def load_txt_splitter(txt_file):
    docs = load_txt_file(txt_file)
    # text_splitter = CharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
    # split_docs = text_splitter.split_documents(docs)
    # #默认展示分割后第一段内容
    # print('split_docs[0]: ', split_docs[0])
    return docs

#分割md文件
def load_md_splitter(md_file):
    docs = load_md_file(md_file)
    # text_splitter = MarkdownTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
    # split_docs = text_splitter.split_documents(docs)
    # #默认展示分割后第一段内容
    # print('split_docs[0]: ', split_docs[0])
    return docs

#分割pdf文件
def load_pdf_splitter(pdf_file):
    docs = load_pdf_file(pdf_file)
    # text_splitter = CharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
    # split_docs = text_splitter.split_documents(docs)
    # #默认展示分割后第一段内容
    # print('split_docs[0]: ', split_docs[0])
    return docs

def getLoaderHandler(docPath,chunk_size=500, chunk_overlap=10):
    docs = []
    # for doc in os.listdir(docs_path):
    doc_path = f'{docPath}'
    if doc_path.endswith('.txt'):
        docs = load_txt_splitter(doc_path)
    if doc_path.endswith('.doc'):
        docs = load_doc_splitter(doc_path)
    if doc_path.endswith('.docx'):
        docs = load_doc_splitter(doc_path)
    elif doc_path.endswith('.md'):
        docs = load_md_splitter(doc_path)
    elif doc_path.endswith('.pdf'):
        docs = load_pdf_splitter(doc_path)
    return docs

# docs = splitDocument("./d1.docx")
# print(docs)
from typing import Optional
from pydantic import BaseModel
from embeddingDB import pgDB
class kdb(BaseModel):
    id: Optional[int] = None
    title: Optional[str] = None
    
import embeddingDB

@router.post("/createKDB")
def createKDB(params:kdb):
    rs = {"code":200,"msg":"创建成功","data":{}}
    title = params.title
    print(params)
    sql = '''
    INSERT INTO knowledge_db(name) VALUES('{}')
    '''.format(title)
    db = pgDB()
    db.modify(sql)
    db.close()
    # k = embeddingDB.VectorDB()
    # k.getCollection(title)
    # print()
    rs["data"] = title
    return rs

    
@router.post("/getKDBList")
def getKDBList(params:kdb):
    rs = {"code":200,"msg":"创建成功","data":{}}
    # title = params.title
    # print(params)
    
    sql = '''
    SELECT knowledge_db.id,knowledge_db.name,COUNT(*)-1 as count 
    FROM knowledge_db
    LEFT JOIN knowledge ON knowledge_db.id = knowledge.kdb_id
    GROUP BY knowledge_db.id
    '''
    db = pgDB()
    data = db.select(sql)
    rs["data"] = data
    db.close()
    print(">>",rs["data"])
    return rs
    
@router.post("/deleteKDB")
def deleteKDB(params:kdb):
    rs = {"code":200,"msg":"删除成功","data":{}}
    
    sql = '''
    DELETE FROM knowledge_db 
    WHERE id = '{}'
    '''.format(params.id)
    db = pgDB()
    data = db.modify(sql)
    # rs["data"] = data
    db.close()
    
    # k.delKDB(params.title)
    
    return rs
    
@router.post("/getKDBInfo")
def getKDBInfo(params:kdb):
    rs = {"code":200,"msg":"查询成功","data":{}}
    # title = params.title
    # print(params)
    
    sql = '''
    SELECT * 
    FROM knowledge_db
    WHERE id = '{}'
    '''.format(params.id)
    print(sql)
    db = pgDB()
    kdb = db.select(sql)[0]
    
    sql = '''
    SELECT content,create_time,id,kdb_id,file_name,download_url
    FROM knowledge
    WHERE kdb_id = '{}'
    '''.format(params.id)
    db = pgDB()
    kList = db.select(sql)
    kdb.update({"kList":kList})
    
    # rs["data"] = data
    # db.close()
    # print(">>",rs["data"])
    # print()
    try:
        fileList = [{"fileName":f } for f in os.listdir("./uploadFiles/{}".format(params.id))]
    except:
        fileList=[]
    # print(kList)
    # k.delKDB(params.title)
    rs["data"] = {"fileList":fileList,"kdb":kdb}
    return rs
    
    
