from qdrant_client import QdrantClient
from sentence_transformers import SentenceTransformer
import numpy as np
import json
import pandas as pd

model = SentenceTransformer(
    "vectordb/model/all-MiniLM-L6-v2", device="cuda"
)

df = pd.read_json("vectordb/data/startups_demo.json", lines=True)

vectors = model.encode(
    [row.alt + ". " + row.description for row in df.itertuples()],
    show_progress_bar=True,
)

np.save("vectordb/data/startup_vectors.npy", vectors, allow_pickle=False)

client = QdrantClient("http://localhost:6333")

if not client.collection_exists("startups"):
    client.create_collection(
        collection_name="startups",
        vectors_config=VectorParams(size=384, distance=Distance.COSINE),
    )

fd = open("vectordb/data/startups_demo.json")

# payload is now an iterator over startup data
payload = map(json.loads, fd)
# Load all vectors into memory, numpy array works as iterable for itself.
# Other option would be to use Mmap, if you don't want to load all data into RAM
vectors = np.load("vectordb/data/startup_vectors.npy")

client.upload_collection(
    collection_name="startups",
    vectors=vectors,
    payload=payload,
    ids=None,  # Vector ids will be assigned automatically
    batch_size=256,  # How many vectors will be uploaded in a single request?
)