import sys
import os
import hydra
import json
from omegaconf import DictConfig, OmegaConf
from tqdm import tqdm
import torch
from os.path import join, exists
import faiss
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from retrivolve.rag.rag_parser import CodeParser
from retrivolve.utils.utils import find_files_with_suffix
from retrivolve.rag.rag_embeddings import LocalEmbeddings
from retrivolve.dataset.faiss_dataset import FaissVecotrDB


def load_data_files(data_path: str, repos: list[str], file_suffix: list[str]) -> list[str]:
    all_files = []
    for repo in repos:
        repo_path = os.path.join(data_path, repo)
        if os.path.exists(repo_path):
            files = find_files_with_suffix(repo_path, file_suffix)
            all_files.extend(files)
        else:
            print(f"Warning: Repository path {repo_path} does not exist.")
    return all_files

def save_chunks_to_json(chunks: dict, output_path: str, embeddings_model: LocalEmbeddings, items_per_file: int = 1000):
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    chunk_items = list(chunks.items())
    for i in tqdm(range(0, len(chunk_items), items_per_file)):
        batch = chunk_items[i:i + items_per_file]
        batch_dict = {k: v for k, v in batch}
        batch_vector_dict = {}
        # Generate embeddings for the chunks
        for file_path, chunk_list in batch:
            embeddings = embeddings_model.get_embedding(chunk_list)
            batch_vector_dict[file_path] = embeddings.tolist()
        batch_file = os.path.join(output_path, f"part_{i//items_per_file + 1}.json")
        batch_vector_file = os.path.join(output_path, f"part_{i//items_per_file + 1}_vectors.json")
        with open(batch_file, 'w', encoding='utf-8') as f:
            json.dump(batch_dict, f, ensure_ascii=False, indent=4)
        with open(batch_vector_file, 'w', encoding='utf-8') as f:
            json.dump(batch_vector_dict, f, ensure_ascii=False, indent=4)
        print(f"Saved {len(batch)} items to {batch_file}")
        print(f"Saved embeddings to {batch_vector_file}")
    

def save_chunks_to_vector_db(chunks: dict, db: FaissVecotrDB, save_path: str):
    for file_path, chunk_list in tqdm(chunks.items()):
        db.add_vector(chunk_list, marks=[f"{file_path}_chunk_{i}" for i in range(len(chunk_list))])
    db.save(save_path)

@hydra.main(version_base=None, config_path="../config", config_name="config")
def run(cfg: DictConfig):
    print(OmegaConf.to_yaml(cfg))
    code_parser = CodeParser()
    # Load data files based on config
    all_files = load_data_files(cfg.data.data_path, cfg.data.repos, cfg.data.file_suffix)
    print(f"Found {len(all_files)} files to process.")
    print(f"Sample files: {all_files[:5]}")
    # Process each file and extract code chunks
    all_chunks = {}
    for file_path in all_files:
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                code = f.read()
            chunks = code_parser.get_chunk(code, lang="python", max_chunk_size=cfg.data.max_chunk_size, overlap_size=cfg.data.overlap_size)
            all_chunks[file_path] = chunks
            print(f"Processed {file_path}, extracted {len(chunks)} chunks.")
        except Exception as e:
            print(f"Error processing {file_path}: {e}")
    print(f"Total chunks extracted: {len(all_chunks)}")
    # Initialize embeddings model
    device = 'cuda' if cfg.model.use_cuda and torch.cuda.is_available() else 'cpu'
    embeddings_model = LocalEmbeddings(model_name_or_path=cfg.model.model_name_or_path, device=device)
    # Save the chunks to a file for further processing
    # save_chunks_to_json(all_chunks, cfg.data.save_path, embeddings_model, items_per_file=cfg.data.items_per_json)

    # Optionally, save to a vector database
    db = FaissVecotrDB(dim=cfg.model.embed_dim, index_type=cfg.database.index_type, embeddings_model=embeddings_model, device='cpu')
    save_chunks_to_vector_db(all_chunks, db, cfg.database.file_path)




if __name__ == "__main__":
    run()
