from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores.faiss import FAISS as Vectorstore
from langchain.text_splitter import CharacterTextSplitter
from langchain.docstore.document import Document

import re, os, time, chardet
import numpy as np
import threading
import sentence_transformers

from plugins.common import settings
from plugins.common import error_helper, success_print, dir_file, response_fail, read_file, write_file, md5
from plugins.common import  CounterLock

 
chunk_size      = int(settings.model.m3e.embedding.size) #分块大小"
chunk_overlap   = int(settings.model.m3e.embedding.overlap) #分块重叠长度
chunk_count     = int(settings.model.m3e.embedding.count) #分块重叠长度
device          = settings.model.m3e.embedding.device #embedding运行设备
model_path      = settings.model.m3e.embedding.path #embedding运行设备

try:
    embeddings = HuggingFaceEmbeddings(model_name='')
    embeddings.client = sentence_transformers.SentenceTransformer(
        model_path, device=device)
except Exception as e:
    error_helper("知识库 加载失败", r"参考: README.md") 
    raise e


vectorstores={}
divider='\n'

#保存文档内容
def save_doc(title, content, typename, userid = None):
    '''
    代码逻辑:
        查找当前文档 当前类型(typename)下(title)文件是否存在. 
            如果存在title一样. 
                判断(content) 是否一样, 
                    如果一样. 忽略内容.结束逻辑
                    如果不一样. 覆盖当前(title).txt文件内容, 从新加载知识库到内存
            如果(title).txt文件名不存在.
                创建 (title).txt内容. 当前内存中加入,当前内容.
    '''
    
    content = doc_filter(content)

    save_info = get_save_info(typename, userid)
    filepath = save_info['text_path'] + '/' + md5(title) + '.txt'
    if os.path.exists(filepath) : #判断文件是否存在
        file_content = read_file(filepath)
        if file_content == content :
            return True  #内容完全相等. 不做处理
        else :
            write_file(filepath, content)
            #删除 vectorstores 内的 知识库内容
            #vectorstores.remove(save_info['keyname'])
            #从新数据训练模块下所有文件
            file_to_memroy(save_info['text_path'], save_info['path'])
            #从新加载模型到 vectorstores 变量内
            load_vectorstore(typename, userid)
    else :
        write_file(filepath, content)
        #训练当前文件到 vectorstores 内
        sava_to_memory(title, content, typename, userid)
        #保存 vectorstores内的模块存储到文件内
        memory_save_disk(typename, userid)

    return True

#输出存储到缓存
def sava_to_memory(title, content, typename, userid):
    save_info = get_save_info(typename, userid)

    docs=[Document(page_content=content, metadata={"source":title })]
    text_splitter = CharacterTextSplitter(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap, separator='\n')
    doc_texts = text_splitter.split_documents(docs)
    texts = [d.page_content for d in doc_texts]
    metadatas = [d.metadata for d in doc_texts]

    vectorstore_new = Vectorstore.from_texts(texts, embeddings, metadatas=metadatas)
    vectorstore = load_vectorstore(typename, userid)

    if vectorstore is None:
        vectorstores[save_info['keyname']]=vectorstore_new
    else:
        vectorstores[save_info['keyname']].merge_from(vectorstore_new)
    return True


#获得知识库列表
def get_doc_list(typename, userid = None):
    '''
    代码逻辑:
        根据当前(typename)类型和 userid. 查找所有的文件内容列表. 
    '''
    save_info = get_save_info(typename, userid)
    all_files = dir_file(save_info['text_path'])
    return all_files

#获得知识库详情
def get_doc_info(doc_id, typename, userid = None):
    '''
    代码逻辑:
        根据(doc_id), (typename),  (userid). 查找文档详情.
    '''
    save_info = get_save_info(typename, userid)
    file_path = save_info['text_path'] + '/'  + doc_id + '.txt'
    if os.path.exists(file_path) == False : 
        return {
            'doc_id': '',
            'content': '',
        }
    
    with open(file_path, 'rb') as f:
        b = f.read()
        result = chardet.detect(b)
    with open(file_path, 'r', encoding=result['encoding']) as f:
        data = f.read()

    return {
        'doc_id': doc_id,
        'content': data,
    }

#删除知识库内容
def del_doc(doc_id, typename, userid = None):
    '''
    代码逻辑:
        根据(doc_id), (typename),  (userid). 删除指定文档.
    '''
    save_info = get_save_info(typename, userid)
    file_path = save_info['text_path'] + '/'  + doc_id + '.txt'
    try:
        os.remove(file_path)
        print(f"文件 {file_path} 删除成功")

        #从新数据训练模块下所有文件
        file_to_memroy(save_info['text_path'], save_info['path'])
    except OSError as e:
        pass

    return True

#知识库内容查找
def find_doc(content, typename, userid = None, step=3):
    '''
    代码逻辑:
        m3e 找出匹配内容.
    '''
    try:
        save_info = get_save_info(typename, userid)
        embedding = load_vectorstore(typename, userid).embedding_function(content)
        scores, indices = vectorstores[save_info['keyname']].index.search(np.array([embedding], dtype=np.float32), chunk_count)
        docs = []
        for j, i in enumerate(indices[0]):
            if i == -1:
                continue
            if scores[0][j]>210:continue
            docs.append(get_doc_range(i,scores[0][j], step, save_info['keyname']))

        return docs
    except Exception as e:
        print(e)
        return []

#获得知识库id附近的上下文内容
def get_doc_range(id, score, step, keyname):
    doc = get_doc_by_id(id,keyname)
    final_content=doc.page_content
    if step > 0:
        for i in range(1, step+1):
            try:
                doc_before=get_doc_by_id(id-i,keyname)
                if get_title_by_doc(doc_before)==get_title_by_doc(doc):
                    final_content=process_strings(doc_before.page_content,divider,final_content)
            except:
                pass
            try:
                doc_after=get_doc_by_id(id+i,keyname)
                if get_title_by_doc(doc_after)==get_title_by_doc(doc):
                    final_content=process_strings(final_content,divider,doc_after.page_content)
            except:
                pass

    if doc.metadata['source'].endswith(".txt"):
        title=f"[{doc.metadata['source']}](/txt/{doc.metadata['source']})"
    else:
        title=doc.metadata['source']
    return {'title': title,'content':re.sub(r'\n+', "\n", final_content),"score":int(score)}
    
#获得文档标题
def get_title_by_doc(doc):
    return re.sub('【.+】', '', doc.metadata['source'])

#2段字符串相识拼接
def process_strings(A, C, B):
    common = ""
    for i in range(1, min(len(A), len(B)) + 1):
        if A[-i:] == B[:i]:
            common = A[-i:]
    if common:
        return A[:-len(common)] + C + B
    else:
        return A + B
    
#获得文档保存路径和keyname
def get_save_info(typename, userid = None):
    keyname         = typename
    path            = 'tools/zsk/memory/'+typename
    text_path       = 'tools/zsk/text/'+typename
    if userid != None :
        keyname = keyname + '_' + userid
        path = path + '/' + userid
        text_path = text_path + '/' + userid 
    
    return {'keyname': keyname, 'path':path, 'text_path':text_path}

#获得缓存内文档内容
def get_doc_by_id(id,keyname):
    return vectorstores[keyname].docstore.search(vectorstores[keyname].index_to_docstore_id[id])

#加载数据模块
def load_vectorstore(typename, userid):
    save_info = get_save_info(typename, userid)
    try:
        return vectorstores[save_info['keyname']]
    except Exception  as e:
        try:
            vectorstores[save_info['keyname']] = Vectorstore.load_local(
                save_info['path'], embeddings=embeddings)
            return vectorstores[save_info['keyname']]
        except Exception  as e:
            error_helper("没有读取到RTST记忆区%s，将新建。"%save_info['keyname'])
    return None

#文本内容过滤
def doc_filter(content):
    #替换多余空行
    content = re.sub(r"\n\s*\n", "\n", content)
    content = re.sub(r'\r', "\n", content)
    content = re.sub(r'\n\n', "\n", content)

    return content

#缓存保存到硬盘
def memory_save_disk(typename, userid = None):
    save_info = get_save_info(typename, userid)
    vectorstores[save_info['keyname']].save_local(save_info['path'])

#删除缓存
def delete_memory(keyname):
    del vectorstores[keyname]


embedding_lock = CounterLock()
vectorstore_lock=threading.Lock()
file_docs = []
file_vectorstore = None

#文本内容训练到缓存
def file_to_memroy(text_path, memory_path):
    global file_docs
    global file_vectorstore

    #目标目录不存在就生成一个
    if not os.path.exists(memory_path):
        os.mkdir(memory_path)

    file_docs = []
    all_files = dir_file(text_path)
 
    #循环文件。开始文本向量
    length_of_read=0
    for i in range(len(all_files)):
        root, file=all_files[i]
        data = ""
        try:
            file_path = os.path.join(root, file)
            _, ext = os.path.splitext(file_path)
            if ext.lower() == '.txt':
                # txt
                with open(file_path, 'rb') as f:
                    b = f.read()
                    result = chardet.detect(b)
                with open(file_path, 'r', encoding=result['encoding']) as f:
                    data = f.read()
        except Exception as e:
            error_helper("文件读取失败，当前文件已被跳过：",file,"。错误信息：",e)
            return False
        

        data = doc_filter(data)
        length_of_read+=len(data)
        file_docs.append(Document(page_content=data, metadata={"source": file}))
        if length_of_read > 1e5:
            success_print("处理进度",int(100*i/len(all_files)),f"%\t({i}/{len(all_files)})")
            make_index()
            length_of_read=0
 
    if len(file_docs) > 0:
        make_index()

    while embedding_lock.get_waiting_threads()>0:
        time.sleep(0.1)

    with embedding_lock:
        time.sleep(0.1)
        with vectorstore_lock:
            success_print("知识库保存完成")
    
    file_vectorstore.save_local(memory_path)
    success_print("知识库存储到本地完成")

    return True


def clac_embedding(texts, embeddings, metadatas):
    global file_vectorstore
    with embedding_lock:
        vectorstore_new = Vectorstore.from_texts(texts, embeddings, metadatas=metadatas)
    with vectorstore_lock:
        if file_vectorstore is None:
            file_vectorstore = vectorstore_new
        else:
            file_vectorstore.merge_from(vectorstore_new)

def make_index():
    global file_docs
    text_splitter = CharacterTextSplitter(
        chunk_size=chunk_size, chunk_overlap=chunk_overlap, separator='\n')

    doc_texts = text_splitter.split_documents(file_docs)
    file_docs = []

    texts = [d.page_content for d in doc_texts]
    metadatas = [d.metadata for d in doc_texts]
    thread = threading.Thread(target=clac_embedding, args=(texts, embeddings, metadatas))
    thread.start()
    while embedding_lock.get_waiting_threads()>2:
        time.sleep(0.1)

success_print('知识库加载成功')