import os

import chromadb
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os

from modelscope import AutoModelForCausalLM, AutoTokenizer, snapshot_download
from modelscope import GenerationConfig
from langchain.prompts import ChatPromptTemplate
import sqlite3

data_root = r'/public/tmp/fengjiahao/bs_challenge_financial_14b_dataset/pdf_txt_file/'
# model_dir = '/datasets/fengjiahao/nlp/TongyiFinance/Tongyi-Finance-14B'
model_dir = '/datasets/fengjiahao/nlp/qwen/Qwen-7B-Chat/'

text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)

client = chromadb.PersistentClient(path="../db/pdf")
collection = client.create_collection(name="company_1500")

documents_list = []
embeddings_list = []
ids_list = []
metadatas_list = []

tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True).eval()
model.generation_config = GenerationConfig.from_pretrained(model_dir, trust_remote_code=True)  # 可指定不同的生成长度、top_p等相关超参

get_company_template = ChatPromptTemplate.from_template(
    "你是一个能精准提取信息的AI。"
    "我会给你一篇招股说明书，请输出此招股说明书的主体是哪家公司，若无法查询到，则输出无。\n"
    "{q}\n\n"
    "请指出以上招股说明书属于哪家公司，请只输出公司名。"
)

file_list = os.listdir(data_root)
for file_name in file_list:
    file_path = os.path.join(data_root, file_name)
    text = ''
    filename = None
    with open(file_path, 'r') as file:
        for line in file:
            text += line
            if '中文名称：' in line or '发行人名称：' in line or '公司名称：' in line or '发行人：' in line:
                filename = line[line.index('：') + 1:].strip()
                if '：' in filename:
                    filename = filename[filename.index('：') + 1:].strip()
                filename = filename.split('；')[0]

    if filename is None or filename == '':
        prompt = get_company_template.format_messages(q=text[:1500])
        filename, history = model.chat(tokenizer, prompt[0].content, history=None)
        if '：' in filename:
            filename = filename[filename.index('：') + 1:]
    print(filename.replace('公司名：', '').split(' ')[0].replace('。', '').replace('.', ''), file_name)

    text = text.replace('\n', ' ')
    chunks = text_splitter.split_text(text)

    # Convert chunks to vector representations and store in Chroma DB

    for i, chunk in enumerate(chunks):
        # embeddings = model.encode(chunk).tolist()

        documents_list.append(chunk)
        # embeddings_list.append(embeddings)
        ids_list.append(f"{filename}_{i}")
        metadatas_list.append(dict(company_name=filename,line=i))
collection.add(
    # embeddings=embeddings_list,
    documents=documents_list,
    ids=ids_list,
    metadatas=metadatas_list
)
