import logging
import json
import random
import hashlib
import time

import nltk
from openai import OpenAI
from pymilvus import connections, FieldSchema, CollectionSchema, DataType, Collection, utility

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

nltk.download('punkt')

# Constants for mimicking Go's g package
MEM_SOURCE = "memory"
NONE_USER = "none_user"

def md5_hash(s):
    return hashlib.md5(s.encode('utf-8')).hexdigest()

def i64_to_s(i):
    return str(i)

def i_to_s(i):
    return str(i)

class EmbeddingFunc:
    def __init__(self, api_key, base_url):
        self.api_key = api_key
        self.base_url = base_url
        self.client = None
        self._cache = {}  # Initialize cache
        if self.api_key and self.base_url:
            self.client = OpenAI(api_key=self.api_key, base_url=self.base_url)
        else:
            print("Warning: DASHSCOPE_API_KEY or DASHSCOPE_API_BASE_URL not provided. Embedding will be mocked.")

    def embed_query(self, query: str) -> list:
        if query in self._cache:
            return self._cache[query]

        if not self.client:
            # Return a mock embedding if client is not initialized
            embedding = [random.random() for _ in range(ZillizVectorDB.DEFAULT_DIMENSION)]
            self._cache[query] = embedding
            return embedding
        try:
            completion = self.client.embeddings.create(
                model="text-embedding-v4",
                input=query,
                dimensions=ZillizVectorDB.DEFAULT_DIMENSION,
                encoding_format="float"
            )
            embedding = completion.data[0].embedding
            self._cache[query] = embedding
            return embedding
        except Exception as e:
            print(f"Error during embedding generation: {e}. Returning mock embedding.")
            embedding = [random.random() for _ in range(ZillizVectorDB.DEFAULT_DIMENSION)]
            self._cache[query] = embedding
            return embedding

def get_zhipu_embedding(texts, embedding_func_obj):
    # This function will now use the actual embedding model
    embeddings = []
    for text in texts:
        embeddings.append(embedding_func_obj.embed_query(text))
    return {"Data": [{"Embedding": emb} for emb in embeddings]}

def split_text(text, max_len, break_words):
    # Use NLTK for more robust sentence splitting
    sentences = nltk.sent_tokenize(text)
    parts = []
    current_part = []
    current_len = 0

    for sentence in sentences:
        if current_len + len(sentence) < max_len:
            current_part.append(sentence)
            current_len += len(sentence)
        else:
            if current_part:
                parts.append(" ".join(current_part))
            current_part = [sentence]
            current_len = len(sentence)
    if current_part:
        parts.append(" ".join(current_part))
    return parts

class ZillizVectorDB:
    DEFAULT_DIMENSION = 768 # Default dimension, consistent with Go code

    def __init__(self, config_path="./settings.txt"):
        self.config_path = config_path
        self.alias = "default"
        self.host = None
        self.token = None
        self.dashscope_api_key = None
        self.dashscope_api_base_url = None
        self._load_config()
        self._connect()

        self.embedding_func = EmbeddingFunc(self.dashscope_api_key, self.dashscope_api_base_url)

    def _load_config(self):
        try:
            with open(self.config_path, 'r', encoding='utf-8') as f:
                config = json.load(f)
            self.host = config.get("Zilliz_Addr")
            self.token = config.get("Zilliz_api_key")
            self.dashscope_api_key = config.get("DASHSCOPE_API_KEY", "")
            self.dashscope_api_base_url = config.get("DASHSCOPE_API_BASE_URL", "")
            self.collection_name = config.get("Zilliz_Collection_Name", "zyinfoai")
            if not self.host or not self.token:
                raise ValueError("Zilliz_Addr or Zilliz_api_key not found in settings.txt")
        except FileNotFoundError:
            print(f"Error: settings.txt not found at {self.config_path}")
            exit(1)
        except json.JSONDecodeError:
            print(f"Error: Could not decode JSON from {self.config_path}")
            exit(1)
        except ValueError as e:
            print(f"Configuration error: {e}")
            exit(1)

    def _connect(self):
        try:
            connections.connect(
                alias=self.alias,
                uri=self.host,
                token=self.token,
                secure=True
            )
            print(f"Successfully connected to Zilliz Cloud at {self.host}")
        except Exception as e:
            raise ConnectionError(f"Failed to connect to Zilliz Cloud: {e}")

    def create_collection(self, collection_name, dim, description="Vector collection"):
        if utility.has_collection(collection_name, using=self.alias):
            print(f"Collection '{collection_name}' already exists.")
            return Collection(collection_name, using=self.alias)

        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=dim),
            FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
            FieldSchema(name="author", dtype=DataType.VARCHAR, max_length=256),
            FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=256),
            FieldSchema(name="document_id", dtype=DataType.VARCHAR, max_length=256),
            FieldSchema(name="source_id", dtype=DataType.VARCHAR, max_length=256),
            FieldSchema(name="result", dtype=DataType.VARCHAR, max_length=65535), # Corresponds to save_text
            FieldSchema(name="created_at", dtype=DataType.VARCHAR, max_length=256)
        ]
        schema = CollectionSchema(fields, description)
        collection = Collection(name=collection_name, schema=schema, using=self.alias)
        print(f"Collection '{collection_name}' created successfully.")

        # Create an index for the vector field
        index_params = {
            "metric_type": "L2",
            "index_type": "IVF_FLAT",
            "params": {"nlist": 128}
        }
        collection.create_index(field_name="vector", index_params=index_params)
        print(f"Index created for collection '{collection_name}'.")
        return collection

    def insert_vectors(self, collection_name, data):
        collection = Collection(collection_name, using=self.alias)
        mr = collection.insert(data)
        print(f"Vectors inserted into '{collection_name}'. Insert count: {mr.insert_count}")
        collection.flush()
        return mr

    def search_vectors(self, collection_name, query_vectors, top_k, search_params, output_fields=None):
        collection = Collection(collection_name, using=self.alias)
        collection.load()
        results = collection.search(
            data=query_vectors,
            anns_field="vector",
            param=search_params,
            limit=top_k,
            expr=None,
            output_fields=output_fields
        )
        collection.release()
        return results

    def delete_collection(self, collection_name):
        if utility.has_collection(collection_name, using=self.alias):
            utility.drop_collection(collection_name, using=self.alias)
            print(f"Collection '{collection_name}' deleted successfully.")
        else:
            print(f"Collection '{collection_name}' does not exist.")

    def upsert_text_zilliz(self, Id, textall0, Source, Author):
        # Mimicking Go's Upsert_text_zilliz
        max_len = 3000 * 2 # Max length for text splitting
        textall = []
        save_text = []

        if Source == MEM_SOURCE:
            textall = textall0[:len(textall0)//2]
            save_text = textall0[len(textall0)//2:]
        else:
            for text in textall0:
                if len(text) < max_len:
                    textall.append(text)
                else:
                    # Split text into parts
                    parts = split_text(text, max_len, [",", ".", "!", ";", "\n", "?"])
                    for part in parts:
                        if len(part) > 0:
                            textall.append(part)

        if not Id:
            Id = md5_hash(Author) + "-" + Source + "-" + i64_to_s(int(time.time() * 1e9))

        print(f"Upsert_text_zilliz: Id: {Id}, num_texts: {len(textall)}")

        authors = []
        document_ids = []
        source_ids = []
        sources = []
        embeddings = []
        texts = []
        save_texts_col = []
        created_at = []

        for idx, text in enumerate(textall):
            embedding_data = get_zhipu_embedding([text], self.embedding_func)
            if not embedding_data or not embedding_data["Data"]:
                print("Error: Embedding generation failed.")
                continue

            embedding = embedding_data["Data"][0]["Embedding"]

            if save_text and idx < len(save_text):
                save_texts_col.append(save_text[idx])
            else:
                save_texts_col.append("")
            
            created_at.append(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
            texts.append(text)
            sources.append(Source)
            authors.append(Author)
            doc_id = i64_to_s(int(time.time() * 1e9)) + "-" + i_to_s(idx)
            document_ids.append(doc_id)
            source_ids.append(Id)
            embeddings.append(embedding)

        data = [
            texts,
            authors,
            sources,
            document_ids,
            source_ids,
            save_texts_col,
            created_at,
            embeddings
        ]
        
        # Milvus insert expects data as a list of lists, where each inner list is a column
        # Transpose the data to match this format
        transposed_data = list(map(list, zip(*data)))

        # Define fields for insertion based on the schema
        fields_to_insert = [
            FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
            FieldSchema(name="author", dtype=DataType.VARCHAR, max_length=256),
            FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=256),
            FieldSchema(name="document_id", dtype=DataType.VARCHAR, max_length=256),
            FieldSchema(name="source_id", dtype=DataType.VARCHAR, max_length=256),
            FieldSchema(name="result", dtype=DataType.VARCHAR, max_length=65535),
            FieldSchema(name="created_at", dtype=DataType.VARCHAR, max_length=256),
            FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=self.DEFAULT_DIMENSION)
        ]

        # Create a list of dictionaries for insertion
        entities = []
        for row in transposed_data:
            entity_dict = {}
            for i, field in enumerate(fields_to_insert):
                entity_dict[field.name] = row[i]
            entities.append(entity_dict)

        collection = Collection(self.collection_name, using=self.alias)
        mr = collection.insert(entities)
        collection.flush()
        print(f"Inserted {mr.insert_count} entities.")
        return "ok", None

    def query_vectordb_zilliz(self, query, doc_id, source, author, topk):
        # Mimicking Go's Query_vectordb_zilliz
        embedding_data = get_zhipu_embedding([query], self.embedding_func)
        if not embedding_data or not embedding_data["Data"]:
            logger.error("Error: Embedding generation failed.")
            return [], "Error: Embedding generation failed."
        
        query_vector = embedding_data["Data"][0]["Embedding"]

        expr = ""
        if author and author != NONE_USER:
            expr += f"author==\"{author}\" "
        if doc_id:
            if expr:
                expr += " && "
            expr += f"source_id==\"{doc_id}\" "
        if source:
            if expr:
                expr += " && "
            expr += f"source==\"{source}\""
        
        print(f"Knowledge search expression: {expr}")

        output_fields = ["text", "author", "source", "document_id", "source_id", "vector"]
        if source == MEM_SOURCE:
            output_fields.extend(["created_at", "result"])

        search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
        results = self.search_vectors(self.collection_name, [query_vector], topk, search_params, output_fields)

        processed_results = []
        for hit in results[0]: # Assuming single query, so results[0]
            result_dict = {
                "text": hit.entity.get("text"),
                "author": hit.entity.get("author"),
                "source": hit.entity.get("source"),
                "document_id": hit.entity.get("document_id"),
                "source_id": hit.entity.get("source_id"),
                "score": hit.distance, # Milvus returns distance, lower is better for L2
                "embedding": hit.entity.get("vector")
            }
            if source == MEM_SOURCE:
                result_dict["created_at"] = hit.entity.get("created_at")
                result_dict["result"] = hit.entity.get("result")
            processed_results.append(result_dict)
        
        # Sort results by score (distance)
        processed_results.sort(key=lambda x: x['score'])

        return processed_results, "ok"

    def delete_vectordb_zilliz(self, ids, document_id, author, deleteall):
        # Mimicking Go's Delete_vectordb_zilliz
        expr = ""
        if ids:
            ids_list = [f'\"{i}\"' for i in ids.split(',') if i]
            expr += f"source_id in [{','.join(ids_list)}] "
        
        if author and author != NONE_USER:
            if expr:
                expr += " and "
            expr += f"author ==\"{author}\" "
        
        if document_id:
            if expr:
                expr += " and "
            expr += f"document_id ==\"{document_id}\""
        
        print(f"Delete expression: {expr}")

        collection = Collection(self.collection_name, using=self.alias)
        try:
            collection.load()
            collection.delete(expr)
            collection.flush()
            collection.release()
            return "ok"
        except Exception as e:
            print(f"Error deleting from Zilliz: {e}")
            return "error Delete"

if __name__ == "__main__":
    # Example Usage:
    # Make sure you have Zilliz_Addr and Zilliz_api_key set in your settings.txt

    db = ZillizVectorDB()

    collection_name = "zyinfoai"
    dimension = ZillizVectorDB.DEFAULT_DIMENSION

    # Ensure the collection is fresh for testing
    db.delete_collection(collection_name)

    # 1. Create a collection
    collection = db.create_collection(collection_name, dimension)

    # 2. Test upsert_text_zilliz
    print("\n--- Testing upsert_text_zilliz ---")
    test_id = "test_doc_123"
    test_texts = ["This is the first sentence.", "This is the second sentence.", "This is a longer text that needs to be split into multiple parts. This part will be truncated. This is another sentence in the longer text."]
    test_source = "test_source"
    test_author = "test_author"
    status, err = db.upsert_text_zilliz(test_id, test_texts, test_source, test_author)
    print(f"Upsert status: {status}, Error: {err}")

    # 3. Test query_vectordb_zilliz
    print("\n--- Testing query_vectordb_zilliz ---")
    query_text = "first sentence"
    results, status_msg = db.query_vectordb_zilliz(query_text, "", test_source, test_author, 5)
    print(f"Query status: {status_msg}")
    for res in results:
        print(f"  Text: {res.get('text')}, Score: {res.get('score')}")

    # 4. Test delete_vectordb_zilliz
    logger.info("\n--- Testing delete_vectordb_zilliz ---")
    delete_status = db.delete_vectordb_zilliz(test_id, "", "", False)
    print(f"Delete status: {delete_status}")

    # 5. Delete the collection (optional, for cleanup)
    # db.delete_collection(collection_name)
