from elasticsearch import Elasticsearch, helpers
from ..base.module import BaseANN
import numpy as np
from tenacity import retry, stop_after_attempt, wait_exponential

def get_conn():
    """获取 Elasticsearch 连接，并检查 knn 插件是否启用"""
    es = Elasticsearch([{'host': 'localhost', 'port': 9200, 'scheme': 'http'}])
    
    # 检查 knn 插件是否启用
    try:
        plugins = es.cat.plugins(format="json")
        if not any(plugin.get("component") == "knn" for plugin in plugins):
            raise RuntimeError("Elasticsearch knn plugin is not enabled. Please install and enable it.")
    except Exception as e:
        print(f"Warning: Could not verify knn plugin: {e}")
    
    return es

class ElasticsearchHnsw(BaseANN):
    def __init__(self, metric, m, ef_construction):
        super().__init__()
        self._metric = metric
        self._m = m
        self._ef_construction = ef_construction
        self.es = get_conn()
        self._ef_search = None
        self._query = None
        self._dataset = None # 用来存储数据集引用

    @property
    def index_name(self):
        """Elasticsearch的索引名称"""
        return f"{self.tablename}_es_hnsw_{self._metric}_{self._m}_{self._ef_construction}"

    def _update_query(self):
        # Elasticsearch的查询是动态构建的，不是一个固定的字符串
        pass

    def done(self):
        if self.es.indices.exists(index=self.index_name):
            self.es.indices.delete(index=self.index_name)

    def get_row_count(self) -> int:
        if not self.es.indices.exists(index=self.index_name):
            return 0
        self.es.indices.refresh(index=self.index_name) # 确保获取最新计数
        return self.es.count(index=self.index_name).get('count', 0)

    def get_vector_by_id(self, vector_id):
        try:
            res = self.es.get(index=self.index_name, id=vector_id)
            return np.array(res['_source']['embedding'])
        except Exception as e:
            print(f"获取向量时出错: {e}")
            return None

    def drop(self):
        if self.es.indices.exists(index=self.index_name):
            self.es.indices.delete(index=self.index_name)

    def set_dataset(self, dataset):
        self._dataset = dataset

    def copy(self, dataset):
        if not self.es.indices.exists(index=self.index_name):
            print("Index does not exist. Please run create_index first.")
            # 确保即使索引不存在也能继续
            self.create_index()
            # return

        train_data = self._dataset["train"]
        total_rows = train_data.shape[0]
        batch_size = 10000

        def generate_actions():
            for i in range(total_rows):
                yield {
                    "_index": self.index_name,
                    "_id": i,
                    "_source": {
                        "embedding": train_data[i].tolist(),
                        "id": i
                    }
                }
        
        print(f"Start copying {total_rows} rows...")
        helpers.bulk(self.es, generate_actions(), chunk_size=batch_size, request_timeout=3600)
        self.es.indices.refresh(index=self.index_name)
        print("Data copying finished.")

    def create_index(self):
        if self._dataset is None:
            raise RuntimeError("Dataset not set, cannot create index without knowing dimension.")

        dim = self._dataset['train'].shape[1]

        mapping = {
            "properties": {
                "embedding": {
                    "type": "dense_vector",
                    "dims": dim,
                    "index": True,  # 显式启用索引
                    "similarity": "l2_norm" if self._metric == "euclidean" else "cosine"
                },
                "id": {
                    "type": "integer"
                }
            }
        }
        
        settings = {
            "index": {
                "number_of_shards": 1,
                "number_of_replicas": 0,
                "knn": True,  # 显式启用 knn
                "knn.algo_param.ef_construction": self._ef_construction,
                "knn.algo_param.m": self._m
            }
        }

        if self.es.indices.exists(index=self.index_name):
            self.es.indices.delete(index=self.index_name)
        
        self.es.indices.create(
            index=self.index_name,
            mappings=mapping,
            settings=settings
        )
        print(f"Elasticsearch index '{self.index_name}' created with HNSW parameters.")

    def set_query_arguments(self, ef_search):
        self._ef_search = ef_search

    @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
    def safe_es_query(self, query_body):
        """安全的 Elasticsearch 查询，支持重试机制"""
        try:
            return self.es.search(index=self.index_name, body=query_body)
        except Exception as e:
            print(f"Elasticsearch query failed: {e}")
            raise

    def query(self, v, n):
        query = {
            "knn": {
                "field": "embedding",
                "query_vector": v.tolist(),
                "k": n,
                "num_candidates": self._ef_search if self._ef_search else n * 10  # 动态调整候选数量
            }
        }
        res = self.safe_es_query({"knn": query["knn"], "size": n})
        return [hit['fields']['id'][0] for hit in res['hits']['hits']]

    def hybrid_query(self, v, n, text_query=None):
        """混合搜索：结合 knn 和全文搜索"""
        knn_query = {
            "knn": {
                "field": "embedding",
                "query_vector": v.tolist(),
                "k": n,
                "num_candidates": self._ef_search if self._ef_search else n * 10
            }
        }

        if text_query:
            text_query = {
                "match": {
                    "text_field": text_query
                }
            }
            knn_query["query"] = text_query

        res = self.safe_es_query(knn_query)
        return [hit['fields']['id'][0] for hit in res['hits']['hits']]

    def insert(self, vector: np.array, vector_id: int = None) -> bool:
        try:
            self.es.index(index=self.index_name, id=vector_id, document={"embedding": vector.tolist(), "id": vector_id})
            return True
        except Exception as e:
            print(f"插入向量时出错: {e}")
            return False

    def delete(self, ids):
        if not ids:
            return True
        try:
            bulk_body = [
                {"delete": {"_index": self.index_name, "_id": str(doc_id)}} for doc_id in ids
            ]
            helpers.bulk(self.es, bulk_body)
            return True
        except Exception as e:
            print(f"Elasticsearch delete failed: {e}")
            return False

    def delete_all_new_data(self, train_size):
        try:
            query = {
                "query": {
                    "range": {
                        "id": {
                            "gte": train_size
                        }
                    }
                }
            }
            self.es.delete_by_query(index=self.index_name, body=query)
            return True
        except Exception as e:
            print(f"Elasticsearch delete_all_new_data failed: {e}")
            return False

    def get_memory_usage(self):
        if not self.es.indices.exists(index=self.index_name):
            return 0
        stats = self.es.indices.stats(index=self.index_name, metric="store")
        return stats['indices'][self.index_name]['total']['store']['size_in_bytes'] / 1024

    def __str__(self):
        return f"es-hnsw(m={self._m}, ef_construction={self._ef_construction}, ef_search={self._ef_search})" 