import os

import chromadb
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from modelscope import AutoModelForCausalLM, AutoTokenizer, snapshot_download
import os
from sentence_transformers import SentenceTransformer
import sqlite3
data_root =r'/datasets/fengjiahao/nlp/bs_challenge_financial_14b_dataset/pdf_txt_file/'
fund_db_path =r'/datasets/fengjiahao/nlp/bs_challenge_financial_14b_dataset/dataset/博金杯比赛数据.db'
model_dir = '/datasets/fengjiahao/nlp/TongyiFinance/Tongyi-Finance-14B'

text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
model = SentenceTransformer('/datasets/fengjiahao/nlp/m3e-base/')

client = chromadb.PersistentClient(path="./db_fund")
collection = client.create_collection(name="fund_db")

documents_list = []
embeddings_list = []
ids_list = []
conn = sqlite3.connect(fund_db_path)
cursorA = conn.cursor()
cursorA.execute("select name from sqlite_master where type='table' order by name;")
table_names = cursorA.fetchall()
for table_name, in table_names:
    cursorA.execute("PRAGMA table_info ( [%s] )"%table_name)
    titles = cursorA.fetchall()
    title_list = [t[1] for t in titles]
    cursorA.execute("select * from %s"%table_name)
    rows = cursorA.fetchall()
    for i,row in enumerate(rows):
        db_prompt = '%s: '%table_name
        for j,col in enumerate(row):
            db_prompt+='%s是%s,'%(title_list[j],str(col))
        embeddings = model.encode(db_prompt).tolist()
        print(db_prompt)
        documents_list.append(db_prompt)
        embeddings_list.append(embeddings)
        ids_list.append(f"{table_name}_{i}")

file_list = os.listdir(data_root)
for file_name in file_list:
    file_path = os.path.join(data_root,file_name)
    text =''
    with open(file_path,'r') as file:
        for line in file:
            text+=line
    text = text.replace('\n',' ')
    chunks = text_splitter.split_text(text)

    # Convert chunks to vector representations and store in Chroma DB

    for i, chunk in enumerate(chunks):
        embeddings = model.encode(chunk).tolist()

        documents_list.append(chunk)
        embeddings_list.append(embeddings)
        ids_list.append(f"{file_name}_{i}")
collection.add(
    embeddings=embeddings_list,
    documents=documents_list,
    ids=ids_list
)

