# 加载与读取文档
import mimetypes
import os, configparser


def loadtext(path):
    path = path.rstrip()
    path = path.replace(' \n', '')

    # 转换绝对路径
    filename = os.path.abspath(path)

    # 判断文档存在，并获得文档类型
    filetype = ''
    if os.path.isfile(filename):
        filetype = mimetypes.guess_type(filename)[0]
    else:
        print(f"File {filename} not found")
        return None

    # 读取文档内容
    text = ""
    if filetype != 'text/plain':
        return None
    else:
        with open(filename, 'rb') as f:
            text = f.read().decode('utf-8')

    return text


# 这里配置了一个简单的配置器，用于读取模型名称的配置，后面要用
def getconfig():
    config = configparser.ConfigParser()
    config.read('config.ini')
    return dict(config.items("main"))


# 把文档分割成知识块
import jieba, re


def split_text_by_sentences(source_text: str,
                            sentences_per_chunk: int,
                            overlap: int) -> list[str]:
    """
    简单地把文档分割为多个知识块，每个知识块都包含指定数量的句子
    """
    if sentences_per_chunk < 2:
        raise ValueError("一个句子至少要有2个chunk！")
    if overlap < 0 or overlap >= sentences_per_chunk - 1:
        raise ValueError("overlap参数必须大于等于0，且小于sentences_per_chunk")

    # 简单化处理，用正则表达式分割句子
    sentences = re.split('(?<=[。！？])\s+', source_text)
    sentences = [sentence.strip() for sentence in sentences if sentence.strip() != '']

    if not sentences:
        print("Nothing to chunk")
        return []

    # 处理overlap参数
    chunks = []
    i = 0
    while i < len(sentences):

        end = min(i + sentences_per_chunk, len(sentences))
        chunk = ' '.join(sentences[i:end])

        if overlap > 0 and i > 1:
            overlap_start = max(0, i - overlap)
            overlap_end = i
            overlap_chunk = ' '.join(sentences[overlap_start:overlap_end])
            chunk = overlap_chunk + ' ' + chunk

        chunks.append(chunk.strip())
        i += sentences_per_chunk

    return chunks


import ollama, chromadb

# # 引入自定义模块
# from load import loadtext, getconfig
# from splitter import split_text_by_sentences

# 向量模型
# embedmodel = getconfig()["embedmodel"]

# 向量库
# chroma = chromadb.HttpClient(host="localhost", port=8000)
# chroma.delete_collection(name="ragdb")
# collection = chroma.get_or_create_collection(name="ragdb")

# 读取文档列表，依次处理
# with open('first.txt',encoding='utf-8') as f:
#     lines = f.readlines()
#     for filename in lines:
#
#         # 加载文档内容
#         text = loadtext(filename)
#
#         # 把文档分割成知识块
#         chunks = split_text_by_sentences(source_text=text,
#                                          sentences_per_chunk=8,
#                                          overlap=0)

        # # 对知识块依次处理
        # for index, chunk in enumerate(chunks):
        #     # 借助基于Ollama部署的本地嵌入模型生成向量
        #     embed = ollama.embeddings(model=embedmodel, prompt=chunk)['embedding']
        #
        #     # 存储到向量库Chroma中，注意这里的参数
        #     collection.add([filename + str(index)], [embed], documents=[chunk], metadatas={"source": filename})

txt = loadtext('first.txt')
splitted = split_text_by_sentences(source_text=txt,sentences_per_chunk=10,overlap=0)
from pymilvus import MilvusClient
# print(splitted)
host = "localhost"
port = "11434"
cl = MilvusClient(
    uri="http://app1:19530",
enable_dynamic_field=True,
  db_name='first'
)

# 对知识块依次处理
for index, chunk in enumerate(splitted):
    print(chunk)

    client = ollama.Client(host=f"http://{host}:{port}")

    ems = client.embeddings(model='milkey/dmeta-embedding-zh:f16', prompt=chunk)['embedding']
    
    print(ems)
    data = [
        {"content":ems}
        ]
    res = cl.insert(
        collection_name="knowledge",
        data=data
    )
    # print(client.embeddings(model='milkey/dmeta-embedding-zh:f16', prompt=chunk)['embedding'])
    # 借助基于Ollama部署的本地嵌入模型生成向量
    # embed = ollama.embeddings(model='milkey/dmeta-embedding-zh:f16', prompt=chunk)['embedding']
    # print(embed)

    # 存储到向量库Chroma中，注意这里的参数
    # collection.add([filename + str(index)], [embed], documents=[chunk], metadatas={"source": filename})