import os
import re
import time

import pandas as pd
import sqlalchemy
import faiss
import numpy as np
# Import SentenceModel after our compatibility fix
# from .adamw_compat import AdamW, get_linear_schedule_with_warmup
from text2vec import SentenceModel

# Name: faiss-cpu
# Version: 1.9.0.post1
# Name: numpy
# Version: 1.26.4
# Name: text2vec
# Version: 1.3.3

INDEX_FILE = "agent/tools/rag/policy_index.index"
TIME_STAMP_FILE = "agent/tools/rag/policy_index_timestamp.stamp"


def get_policy_list(engine):
    conn = engine.connect()
    policy_list_result = conn.execute(sqlalchemy.text("""
                            SELECT policy_name, content, publish_time
                            FROM policy
                            ORDER BY publish_time
                    """), {
        "uid": ""
    })
    conn.commit()
    policy_list = policy_list_result.fetchall()
    conn.commit()
    return policy_list


def chunk_policy_list(policy_list):
    chunked_dict = {}
    chunked_policy_list = []
    # 定义中英文标点符号的正则表达式
    punctuation_pattern = r'[.?!;。！？；]'

    for policy_name, content, publish_time in policy_list:
        if content:
            # 使用正则表达式分割内容
            content_chunks = re.split(punctuation_pattern, content)
            # 过滤掉空字符串
            content_chunks = [chunk for chunk in content_chunks if chunk]
            # 对于每个content块，创建一个新的policy条目
            chunked_dict[policy_name] = []
            for index, chunk in enumerate(content_chunks):
                chunked_dict[policy_name].append((policy_name, chunk, publish_time, index))
                chunked_policy_list.append((policy_name, chunk, publish_time, index))
    return chunked_dict, chunked_policy_list


def aggregate_policy_chunks(retrieved_policies, policy_dict):
    aggregated_policies = {}
    visited_indices = {}
    # print(retrieved_policies)
    for policy_name, content, publish_time, index in retrieved_policies:
        index_set = visited_indices.get(policy_name)
        if not index_set:
            visited_indices[policy_name] = set()
        index_range = len(policy_dict.get(policy_name))
        visited_indices[policy_name].add(index)
        if index < index_range - 1:
            visited_indices[policy_name].add(index + 1)
        if index > 0:
            visited_indices[policy_name].add(index - 1)
    # print(visited_indices)
    for policy_name, index_set in visited_indices.items():
        content = ""
        publish_time = None
        index_list = list(index_set)
        for i in sorted(index_list):
            content += policy_dict[policy_name][i][1]
            publish_time = policy_dict[policy_name][i][2]
        aggregated_policies[policy_name] = (policy_name, content, publish_time)
        # print(aggregated_policies)
    return aggregated_policies


def save_index_with_timestamp(index):
    faiss.write_index(index, INDEX_FILE)
    with open(TIME_STAMP_FILE, 'w') as f:
        f.write(str(time.time()))


def load_index_with_timestamp():
    if os.path.exists(TIME_STAMP_FILE):
        with open(TIME_STAMP_FILE, 'r') as f:
            timestamp = float(f.read())
            if time.time() - timestamp < 60:
                return faiss.read_index(INDEX_FILE)
    return None


def get_from_vector_db(question_list: list[str], num: int, model_path: str, engine):
    # 从数据库获取政策列表
    policy_list = get_policy_list(engine)
    chunked_policy_dict, chunked_policy_list = chunk_policy_list(policy_list)
    # print(chunked_policy_dict)

    # 加载模型
    model = SentenceModel(model_name_or_path=model_path)

    index = load_index_with_timestamp()
    # index = None

    if index is None:
        # 对政策内容进行编码
        sentences = [policy[1] for policy in chunked_policy_list]
        embeddings = model.encode(sentences)

        try:
            # 创建Faiss索引
            index = faiss.IndexFlatL2(embeddings.shape[1])
            index.add(embeddings)
        except IndexError:
            # 如果embeddings.shape[1]出现IndexError，则跳过创建索引
            print("None base knowledge")
            return None

        # 保存索引和时间戳
        save_index_with_timestamp(index)

    # 对查询问题进行编码，并从索引中检索最接近的文档
    k = num  # 检索最近邻的个数
    query_embedding = model.encode(question_list)
    D, I = index.search(query_embedding, k)
    # print(D, I)
    # for i, j in enumerate(I[0]):
    #     print(D[0][i], sentences[j])

    # 获取检索到的政策文档
    retrieved_policies = set()
    for i in I[0]:
        if i != -1:
            retrieved_policies.add(chunked_policy_list[i])
    retrieved_policies = aggregate_policy_chunks(retrieved_policies, chunked_policy_dict)
    print(question_list)
    print(retrieved_policies)
    return retrieved_policies
