from docx import Document
import os
import numpy as np
from methods import split_into_segments
from call_model import call_embedding_model
from database import save_faiss

# 如果是doc文件，需要将doc文件转为docx文件
def trans_doc2docx(filepath):
    import subprocess

    output_dir = os.path.dirname(filepath)

    # 定义要执行的命令
    command = [
        'soffice', 
        '--headless', 
        '--convert-to', 
        'docx', 
        '--outdir',
        output_dir,
        filepath
    ]

    try:
        result = subprocess.run(command, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        
        # 转换成功
        return True
    except:
        # 转换失败
        return False



# 从docx中提取文本内容
def extract_text_from_docx(file_path):
    document = Document(file_path)
    full_text = []
    for para in document.paragraphs:
        if para.text.strip():  # 忽略空白段落
            full_text.append(para.text)
    return "\n".join(full_text)
    

# 将分块存储到向量数据库中
def save_chunks(chunks, path, conn, cursor, faiss, faiss_path):
    for chunk in chunks:
        cursor.execute('''
            INSERT INTO origin (path, content, tokens, time)
            VALUES (?, ?, ?, ?)
        ''', (path, chunk, 0, 0))

        conn.commit()
        last_id = cursor.lastrowid

        # 将chunk向量化
        embedding = call_embedding_model(chunk)
        if embedding is not None:
            # 插入到faiss中
            faiss.add_with_ids(embedding, np.array([last_id]))
            # 保存faiss
            save_faiss(faiss, faiss_path)


def insert_words(filepath, conn, cursor, faiss, faiss_path):
    if os.path.isfile(filepath):
        if filepath[-4:] == '.doc':
            result = trans_doc2docx(filepath)
            if result:
                filepath = filepath + 'x'

        # 进行处理
        contents = extract_text_from_docx(filepath)
        chunks = split_into_segments(contents)

        # 将分块存储到向量数据库中
        save_chunks(chunks, filepath, conn, cursor, faiss, faiss_path)

        return None


    else:
        return "file is not exist!"



