import os

import chromadb
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
data_root =r'/datasets/fengjiahao/nlp/bs_challenge_financial_14b_dataset/pdf_txt_file/'
model_dir = '/datasets/fengjiahao/nlp/TongyiFinance/Tongyi-Finance-14B'

text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)

client = chromadb.PersistentClient(path="../db/pdf")
collection = client.create_collection(name="pdf")

documents_list = []
embeddings_list = []
ids_list = []


file_list = os.listdir(data_root)
for file_name in file_list:
    file_path = os.path.join(data_root,file_name)
    text =''
    with open(file_path,'r') as file:
        for line in file:
            text+=line
    text = text.replace('\n',' ')
    chunks = text_splitter.split_text(text)

    # Convert chunks to vector representations and store in Chroma DB

    for i, chunk in enumerate(chunks):
        # embeddings = model.encode(chunk).tolist()

        documents_list.append(chunk)
        # embeddings_list.append(embeddings)
        ids_list.append(f"{file_name}_{i}")
collection.add(
    # embeddings=embeddings_list,
    documents=documents_list,
    ids=ids_list
)

