


def get_chunks():  # 文本分块
    import os
    data_dir = r'D:\code\other\LLMs'
    data_dir = r'D:\code\git\zxc\project_format_jzbxh'
    # {'file_name':None,'content':}
    # 获取目标文件地址列表
    ls_path = []  # 目标文件地址list
    ignore_dir_names = ['llm_py310', '.git', 'TARGET']
    for root, dirs, files in os.walk(data_dir):  # os.walk 递归遍历
        # bn = os.path.basename(root)
        # if bn in ignore_dir_names:
        dirs[:] = [d for d in dirs if d not in ignore_dir_names]  # 删除特定dir

        for fn in files:
            fn0, ext = os.path.splitext(fn)
            if ext.lower() in ['.py']:  #
                fn1 = os.path.join(root, fn)
                ls_path.append(fn1)

    ls_content = []
    for i in ls_path:
        with open(i, 'r', encoding='utf-8') as f:
            content = f.read()  # 读取整个
        ls_content.append(content)

    # 划分策略
    ls_chunk = []
    ls_len = []
    for content_str in ls_content:
        ls_len.append(len(content_str))
        chunk_len = 1000
        chunk_overlap = 50  # 重合字符数
        chunks = []
        p = 0
        while p < len(content_str):
            chunk = content_str[p:p + chunk_len]  # 默认划分策略 均匀划分
            p = p + chunk_len - chunk_overlap
            chunks.append(chunk)
        # ls_chunk.append(chunks)
        ls_chunk.extend(chunks)

    # res = list(zip(ls_path,ls_content,ls_len,ls_chunk))
    return ls_chunk


def get_rag_tool():
    ls_chunk = get_chunks()[:50]
    from transformers import AutoTokenizer, AutoModel
    import torch
    # Load model from HuggingFace Hub
    model_path = r'D:\code\other\LLMs\third\tiny-universe\content\TinyRAG\data\embd_model\bge-base-zh-v1.5'
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = AutoModel.from_pretrained(model_path)
    model.eval()

    def embd_infer(chunk):
        encoded_input = tokenizer(chunk, padding=True, truncation=True, return_tensors='pt')
        # Compute token embeddings
        with torch.no_grad():
            model_output = model(**encoded_input)
            # Perform pooling. In this case, cls pooling.
            sentence_embeddings = model_output[0][:, 0]
        # normalize embeddings
        sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1)
        sentence_embeddings = sentence_embeddings.tolist()[0]  # List[float]
        return sentence_embeddings

    # chunk to embedding
    embedding_ls = []
    for ind, chunk in enumerate(ls_chunk):
        # Tokenize sentences
        print(f'{ind}/{len(ls_chunk)}')
        embedding = embd_infer(chunk)
        embedding_ls.append(embedding)

    stores = [ls_chunk, embedding_ls]  # 内存

    from typing import List
    import numpy as np
    def query(qurey_str: str) -> List[str]:
        # query_vector = EmbeddingModel.get_embedding(query)
        embedding1 = embd_infer(qurey_str)
        scores = []
        for embedding in stores[1]:  # 效率低
            xsd = 0  # 相似度
            dot_product = np.dot(embedding1, embedding)
            magnitude = np.linalg.norm(embedding1) * np.linalg.norm(embedding)
            if magnitude:
                xsd = dot_product / magnitude
            scores.append(xsd)
        k = 2
        tinds = np.array(scores).argsort()[-k:][::-1]
        return np.array(stores[0])[tinds].tolist()

    return query


pass


def test_get_rag_tool():
    tol = get_rag_tool()
    res = tol('test fenglun的逻辑')
    for r in res:
        print(r)



if __name__ == '__main__':
    # chunks = get_chunks()

    test_get_rag_tool()
