# coding=utf-8

from db.mysql.text_chunk_dao import TextChunkDAO
from db.milvus.text_chunk_dao import TextChunkDAO as TextChunkMilvusDAO

# # 步骤一：切割文本，并存入数据库
# separator = "。"
# text_splitter = CharacterTextSplitter(
#     separator=separator,
#     chunk_size=250,
#     chunk_overlap=60,
#     length_function=len
# )
#
#
# video_dao = VideoDAO()
# text_chunk_dao = TextChunkDAO()
#
# for i in range(1, 200+1):
#     video_id = i
#     video_content = video_dao.get_video_content(id=video_id)
#
#     metadatas = [{"video_id": video_content.get("video_id")}, ]
#     text_chunks = text_splitter.create_documents(
#         texts=[video_content.get("content")],
#         metadatas=metadatas
#     )
#
#     for text_chunk in text_chunks:
#         content = text_chunk.page_content
#         text_chunk_dao.insert_text_chunks(video_id=video_id, content=content)


# # 步骤二：将文本块向量化，并存入数据库
# openai_bot = OpenAIBot()
# text_chunk_dao = TextChunkDAO()
# for i in range(1, 669+1):
#     chunk_id = i
#     text_chunk = text_chunk_dao.get_text_chunks(chunk_ids=[i, ])[0]
#     content = text_chunk.get("content")
#     content_vector = openai_bot.embeddings(text_list=[content, ])[0]
#
#     text_chunk_dao.update_content_vector(
#         chunk_id=chunk_id,
#         content_vector=json.dumps(content_vector, ensure_ascii=False)
#     )


# # 步骤三：创建milvus集合
# text_chunk_milvus_dao = TextChunkMilvusDAO()
# text_chunk_milvus_dao.create_collection(dim=1536)

pass
