import sys
import os
import torch
import json
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from retrivolve.rag.rag_embeddings import LocalEmbeddings

def test_local_embeddings():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model_path = '/workspace/model_hub/Qwen3-Embedding-4B'
    local_embeddings = LocalEmbeddings(model_name_or_path=model_path,device=device)
    # The queries and documents to embed
    queries = [
        "What is the capital of China?",
        "Explain gravity",
    ]
    documents = [
        "The capital of China is Beijing.",
        "Gravity is a force that attracts two bodies towards each other. It gives weight to physical objects and is responsible for the movement of planets around the sun.",
    ]
    # Get embeddings
    query_embeddings = local_embeddings.get_embedding(queries)
    document_embeddings = local_embeddings.get_embedding(documents)
    print("Query Embeddings:", query_embeddings.shape)
    print("Document Embeddings:", document_embeddings.shape)
    # Calculate similarity between the first query and the first document
    similarity = local_embeddings.cal_similarity(query_embeddings, document_embeddings)
    print("Similarity between first query and first document:", similarity)

def data_analysis():
    data_path = '/workspace/Pikachuchu/data/crag_task_1_and_2_dev_v4.jsonl'
    data = []
    with open(data_path, 'r', encoding='utf-8') as f:
        for line in f:
            line = line.strip()
            if line:
                data = json.loads(line)
                print(data)
                exit(1)




if __name__ == "__main__":
    data_analysis()
    # test_local_embeddings()