import os
from  vtdb import adbcli
from common import hztool
from langchain_community.document_loaders import Docx2txtLoader,TextLoader
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter

from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import text
from sqlalchemy import create_engine
from db.mysqlconn import getMySession,myeng,mydbkey

from sqlalchemy.ext.automap import automap_base

def get_embs():
    # 创建一个DashScopeEmbeddings对象并赋值给embeddings_model变量
    embeddings_model = DashScopeEmbeddings(
        model="text-embedding-v3",
        dashscope_api_key=os.getenv("QWEN_API_KEY")
    )

    # embs = embeddings_model.embed_documents(texts_str)

    return embeddings_model

def dfspliter( ):
     # 创建一个名为text_splitter的RecursiveCharacterTextSplitter对象
    text_splitter = RecursiveCharacterTextSplitter(
        separators=["\n\n", "\n", "。", "."],
        chunk_size=2000,
        chunk_overlap=50,
        length_function=len,
        add_start_index=True,
    )
 
    return text_splitter
 

def doc_sp(doc_url):
    
    loader = Docx2txtLoader(doc_url)
    
    document = loader.load()

    
    text_splitter = dfspliter()

    # 对读取的文档内容进行拆分
    texts = text_splitter.split_documents(document)
   
    return texts
 
def recdb0(data):
    _data,idd,metadatas=hztool.chunk2dbcode(data)
    engine,session,Base,classes=mydbkey()
    
    File =  classes.t_file
    FileChk =  classes.t_file_chunk
     


    adbcli.add("tdd4",_data,metadatas,idd)
    result=adbcli.qry("tdd3","是我们可以互相尊重",3)
    print(f'运行结果为：{result}')

    # Insert a new user
    new_user = File(name='Alice')
    result=session.add(new_user)
    session.flush()  # 确保数据被写入数据库但不提交事务
    last_id = session.connection().connection.insert_id()  # 获取最后插入行的自增ID
    print("Last Insert ID:", last_id)
    
    for item in idd:
        new_user2 = FileChk(file_id=last_id,   ck=item)
        result=session.add(new_user2)
    
    print("Last Insert ID:", last_id)
    session.commit()



def recdb(data):
    _data,idd,metadatas=hztool.chunk2dbcode(data)
    adbcli.add("tdd4",_data,metadatas,idd)
    


def pdf_sp(pdf_url):
     
    loader = PyPDFLoader(pdf_url)
    
    pages = loader.load_and_split()

    # 创建一个名为text_splitter的RecursiveCharacterTextSplitter对象
    text_splitter =dfspliter()

    # 对读取的文档内容进行拆分
    texts = text_splitter.split_documents(pages)
     
    return texts

    
def txt_sp(txt_url,encoding='gbk'):
    # 创建一个名为loader的PyPDFLoader对象，加载PDF文件"data/llama2.pdf"
    loader =TextLoader(txt_url,encoding=encoding)
    # 调用PyPDFLoader对象的load_and_split方法，加载并拆分PDF文件的页面
    document = loader.load()

    # 创建一个名为text_splitter的RecursiveCharacterTextSplitter对象
    text_splitter = dfspliter(  )

    # 对读取的文档内容进行拆分
    texts = text_splitter.split_documents(document)
   
    return texts
     
def load0():
    data=doc_split("d:/beifen/aitest2.docx")
    
    _data,idd,metadatas=hztool.chunk2dbcode(data)
    engine,session,Base=dbkey()
    
    File = Base.classes.t_file
    FileChk = Base.classes.t_file_chunk
     


    adbcli.add("tdd3",_data,metadatas,idd)
    result=adbcli.qry("tdd3","是我们可以互相尊重")
    print(f'运行结果为：{result}')

    # Insert a new user
    new_user = File(name='Alice')
    result=session.add(new_user)
    session.flush()  # 确保数据被写入数据库但不提交事务
    last_id = session.connection().connection.insert_id()  # 获取最后插入行的自增ID
    print("Last Insert ID:", last_id)
    
    for item in idd:
        new_user2 = FileChk(file_id=last_id,   ck=item)
        result=session.add(new_user2)
    
    print("Last Insert ID:", last_id)
    session.commit()

def dirload0(path,glob="**/*.pdf"):

    loader = DirectoryLoader(path=path, glob=glob, show_progress=True)
    documents = loader.load()

    
    text_splitter = dfspliter()

    # 对读取的文档内容进行拆分
    texts = text_splitter.split_documents(document)
    recdb(texts)

def dirload(path):
    ls=hztool.pathlist(path)
    for fp in ls :
        try:
            loadall(fp)
        except Exception  as e:
            print(e)


def docload(url):
    data=doc_sp(url)  
    recdb(data)

def pdfload(url):
    data=pdf_sp(url)  
    recdb(data)    
 
 
def txtload(url,encoding="gbk"):
    data=txt_sp(url,encoding )  
    recdb(data)    

def loadall(url):
    if hztool.isDoc(url):
        docload(url)
    elif hztool.isPdf(url):
        pdfload(url)
    elif hztool.isTxt(url):
        txtload(url)
#load()
