'''
rag 存储+检索； 基于langchain接口
'''
import os.path

from langchain_core.documents import Document
# from rag_tool import get_chunks

# pip install langchain-chroma
from langchain_chroma import Chroma  #
from base import CustomEmbeddings


def get_chunks(data_dir=r'/mnt/d/xiancai/bigfiles/rag_data/raw_data/LLMs'):  # 文本分块
    import os
    # data_dir = r'D:\code\other\LLMs'
    # data_dir = r'/mnt/d/xiancai/bigfiles/rag_data/raw_data/LLMs'

    # diffusers / project_format_fubatianxian / Window_Vscode_Cmake_CPP_Helloworld /
    # linux_0
    # .0
    # .1 / project_format_jzbxh / Yolov5_insSeg /
    # LLMs / project_fubahanxi / yolov8 /
    # mi_toolbox / project_honey / Yolov8_Openvino /
    # ncnn - android - yolov5 / shwj_honey /

    # {'file_name':None,'content':}
    # 获取目标文件地址列表
    ls_path = []  # 目标文件地址list
    ignore_dir_names = ['llm_py310', '.git', 'TARGET','third']
    print(data_dir)
    print('-'*50)
    for root, dirs, files in os.walk(data_dir):  # os.walk 递归遍历
        # bn = os.path.basename(root)
        # if bn in ignore_dir_names:
        dirs[:] = [d for d in dirs if d not in ignore_dir_names]  # 删除特定dir

        for fn in files:
            fn0, ext = os.path.splitext(fn)
            if ext.lower() in ['.py']:  #
                fn1 = os.path.join(root, fn)
                ls_path.append(fn1)

    ls_content = []
    for ind, i in enumerate(ls_path):
        with open(i, 'r', encoding='utf-8') as f:
            content = f.read()  # 读取整个
        ls_content.append(content)
        print(f'{ind}/{len(ls_path)} {i}')

    # 划分策略
    ls_chunk = []
    ls_len = []
    for content_str in ls_content:
        ls_len.append(len(content_str))
        chunk_len = 10000
        chunk_overlap = 50  # 重合字符数
        chunks = []
        p = 0
        while p < len(content_str):
            chunk = content_str[p:p + chunk_len]  # 默认划分策略 均匀划分
            p = p + chunk_len - chunk_overlap
            chunks.append(chunk)
        ls_chunk.append(chunks)  # 含文件路径信息
        # ls_chunk.extend(chunks)

    res = list(zip(ls_path, ls_content, ls_len, ls_chunk))  #
    # return ls_chunk
    return res


'''
预处理
生成jsonl, 使用llm增强
'''


def raw_data_2_jsonl(data_dir=r'/mnt/d/xiancai/bigfiles/rag_data/raw_data/LLMs',
                     save_dir=r'D:\code\other\LLMs\local_data'
                     ):
    # item_dict = {
    #     'pathx': None, # 文件路径
    #     'lenx': None, # 文件长度
    #     'content': None, # 文件内容
    #     'query': [], # 询问内容
    #     'response':[], # 响应内容
    #     'meaning': [], # 解释内容
    # }
    # code_str = ''
    # question_prompt = f'{code_str} 生成3句和这段代码有关的询问内容；'
    # response_prompt = f'问题：{}\n结合以下资料回答问题，不超过5句话:{code_str}'
    # meaning_prompt = f'{code_str} 只用3句话解释上述代码内容'

    # 输入
    raw_data = get_chunks(data_dir=data_dir)  # [[]]
    # 输出
    # save_dir = r'D:\code\other\LLMs\local_data'
    jsonl = []
    is_custom_llm = True
    if is_custom_llm:
        import sys
        sys.path.append(r'/home/ps/zhangxiancai/llm_deploy/LLMs/my_langchain')
        os.environ['CUDA_VISIBLE_DEVICES'] = '1,2,3'
        from lc_infer import Custom_Langchain_ChatLLM
        model_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/Qwen2.5-14B-Instruct'
        llm = Custom_Langchain_ChatLLM(mode_name_or_path=model_path)  # qwen-14b
    else:
        from langchain_community.chat_models.tongyi import ChatTongyi
        llm = ChatTongyi(streaming=False)  # 远程的  BaseChatModel

    for ind, (pathx, contentx, lenx, chunks) in enumerate(raw_data):  # 格式转换
        print(f'{ind}/{len(raw_data)}')
        item_dict = {
            'pathx': pathx,  # 文件路径
            'lenx': lenx,  # 文件长度
            'content': contentx,  # 文件内容
            'question': [],  # 询问内容
            'response': [],  # 响应内容
            # 'meaning': [],  # 解释内容
        }

        # 生成question
        code_str = contentx
        question_prompt = f'{code_str} \n生成5句和这段代码有关的询问内容，每句话以\n隔开'
        questions_str = llm.invoke(question_prompt).content  # 增强;
        try:
            question_ls = questions_str.split('\n')
        except:
            print(f'{question_ls} 格式错误')
            question_ls = [questions_str]
        # 保存
        item_dict['question'] = question_ls

        # 生成response
        for question_str in question_ls:
            response_prompt = f'问题：{question_str}\n结合以下资料回答问题，不超过5句话:\n{code_str}'
            response_str = llm.invoke(response_prompt).content  # 增强;
            # response_ls = response_str.split('\n') # todo
            item_dict['response'].append(response_str)
        jsonl.append(item_dict)




    # 保存jsonl
    import json
    ps = list(range(0, len(jsonl), 1000))
    for p in ps:
        save_name = f'{save_dir}/question_response_{p}.jsonl'
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        print(f'保存至 {save_name}')
        with open(save_name, 'w', encoding='utf-8') as f:
            for item in jsonl[p:p + 1000]:
                f.write(json.dumps(item, ensure_ascii=False) + '\n')





'''
# # 存储; 制作文档向量数据库
'''


def creat_rag_db(save_path=f'/mnt/d/xiancai/bigfiles/rag_data/',
                 embd_model_path=r'/mnt/d/xiancai/bigfiles/models/bge-base-zh-v1___5'):
    res = get_chunks()
    docs = []
    for pathx, contentx, lenx, chunks in res:  # 格式转换
        for chunk in chunks:
            page_content = f'文件路径:\n{pathx}\n文件总长度:\n{lenx}\n文件块:\n{chunk} '  # 这里增强
            doc = Document(page_content=page_content, metadata={"len": lenx, "path": pathx}, )
            docs.append(doc)

    # 存储
    # db = Chroma.from_documents(docs, CustomEmbeddings()) #
    # query = "what is the meaning of test fenglun"
    # docs = db.similarity_search(query)
    vector_store = Chroma(
        collection_name="code_db",  # sqlit数据库名称？
        # collection_name="test",  #
        embedding_function=CustomEmbeddings(model_path=embd_model_path),
        persist_directory=save_path,
        # Where to save data locally, remove if not necessary
    )
    # vector_store.delete_collection()
    vector_store.reset_collection()
    vector_store.add_documents(docs)


# 查询
def get_rag_query(save_path=f'/mnt/d/xiancai/bigfiles/rag_data/',
                  embd_model_path=r'/mnt/d/xiancai/bigfiles/models/bge-base-zh-v1___5'):
    vector_store = Chroma(
        collection_name="code_db",
        embedding_function=CustomEmbeddings(model_path=embd_model_path),
        persist_directory=save_path,
        # Where to save data locally, remove if not necessary
    )

    def query(query: str):
        docs = vector_store.similarity_search(query, k=4)
        return docs  # Document

    return query


def test_creat_rag_db():
    save_path = f'/home/ps/zhangxiancai/data/llm_deploy/rag_db'
    embd_model_path = r'/mnt/d/xiancai/bigfiles/models/bge-base-zh-v1___5'

    # save_path = f'D:\code\other\LLMs\local_data'
    # embd_model_path = r'D:\code\other\LLMs\third\tiny-universe\content\TinyRAG\data\embd_model\bge-base-zh-v1.5'
    creat_rag_db(save_path=save_path, embd_model_path=embd_model_path)


def test_get_rag_query():
    save_path = f'/home/ps/zhangxiancai/data/llm_deploy/rag_db'
    embd_model_path = r'/mnt/d/xiancai/bigfiles/models/bge-base-zh-v1___5'
    query = get_rag_query(save_path=save_path, embd_model_path=embd_model_path)
    docs_res = query('test_get_rag_query')
    print(len(docs_res))
    for ind, d in enumerate(docs_res):
        print(f"{ind} {'-' * 50}")
        print(d.metadata['path'])
        print('-' * 10)
        print(d.page_content)

        # print(res)


def test_raw_data_2_jsonl():
    data_dir = r'/mnt/d/xiancai/bigfiles/rag_data/raw_data/LLMs'
    save_dir = r'/home/ps/zhangxiancai/llm_deploy/LLMs/local_data'
    raw_data_2_jsonl(data_dir,save_dir)


if __name__ == '__main__':
    test_raw_data_2_jsonl()

    # test_creat_rag_db()
    # test_get_rag_query()
