import random
import hashlib
import json

class EmbeddingFunc:
    def embed_query(self, query: str) -> list:
        # Simplified mock embedding function
        # In a real scenario, this would call an actual embedding model
        random.seed(hash(query)) # Make embeddings somewhat consistent for the same query
        return [random.random() for _ in range(128)] # Return a fixed-size random vector

class VectorDB:
    DEFAULT_DIMENSION = 128

    def __init__(self):
        self.collection = [] # Stores dictionaries: {'id': str, 'text': str, 'vector': list}
        self.embedding_func = EmbeddingFunc()
        self.next_id = 0

    def upsert_text(self, text: str, metadata: dict = None) -> str:
        vector = self.embedding_func.embed_query(text)
        entry_id = str(self.next_id)
        self.next_id += 1
        
        entry = {'id': entry_id, 'text': text, 'vector': vector}
        if metadata:
            entry.update(metadata)
        self.collection.append(entry)
        return entry_id

    def search_vectors(self, query_text: str, top_k: int = 5) -> list:
        query_vector = self.embedding_func.embed_query(query_text)
        
        # Simple cosine similarity for demonstration
        def cosine_similarity(vec1, vec2):
            dot_product = sum(v1 * v2 for v1, v2 in zip(vec1, vec2))
            magnitude1 = sum(v**2 for v in vec1)**0.5
            magnitude2 = sum(v**2 for v in vec2)**0.5
            if magnitude1 == 0 or magnitude2 == 0:
                return 0.0
            return dot_product / (magnitude1 * magnitude2)

        scores = []
        for entry in self.collection:
            score = cosine_similarity(query_vector, entry['vector'])
            scores.append((score, entry))
        
        scores.sort(key=lambda x: x[0], reverse=True) # Sort by similarity, highest first
        
        results = []
        for score, entry in scores[:top_k]:
            result_entry = entry.copy()
            result_entry['score'] = score
            results.append(result_entry)
        return results

    def delete_entry(self, entry_id: str):
        self.collection = [entry for entry in self.collection if entry['id'] != entry_id]

if __name__ == "__main__":
    db = VectorDB()

    # Test upsert
    print("\n--- Testing upsert_text ---")
    id1 = db.upsert_text("The quick brown fox jumps over the lazy dog.", {"source": "proverb"})
    id2 = db.upsert_text("Artificial intelligence is rapidly advancing.", {"source": "tech_news"})
    id3 = db.upsert_text("Python is a versatile programming language.", {"source": "programming"})
    print(f"Upserted texts with IDs: {id1}, {id2}, {id3}")
    print(f"Current collection size: {len(db.collection)}")

    # Test search
    print("\n--- Testing search_vectors ---")
    query_text = "What is AI?"
    results = db.search_vectors(query_text, top_k=2)
    print(f"Search results for '{query_text}':")
    for res in results:
        print(f"  ID: {res['id']}, Score: {res['score']:.4f}, Text: {res['text']}, Source: {res.get('source')}")

    query_text_2 = "programming languages"
    results_2 = db.search_vectors(query_text_2, top_k=1)
    print(f"\nSearch results for '{query_text_2}':")
    for res in results_2:
        print(f"  ID: {res['id']}, Score: {res['score']:.4f}, Text: {res['text']}, Source: {res.get('source')}")

    # Test delete
    print("\n--- Testing delete_entry ---")
    db.delete_entry(id1)
    print(f"Deleted ID: {id1}. Current collection size: {len(db.collection)}")
    results_after_delete = db.search_vectors("fox", top_k=1)
    print(f"Search results for 'fox' after delete:")
    if results_after_delete:
        for res in results_after_delete:
            print(f"  ID: {res['id']}, Score: {res['score']:.4f}, Text: {res['text']}")
    else:
        print("  No results found.")
