import hydra
from omegaconf import DictConfig, OmegaConf
from retrivolve.dataset.faiss_dataset import FaissVecotrDB
from retrivolve.rag.rag_embeddings import LocalEmbeddings
import torch


@hydra.main(version_base=None, config_path="./config", config_name="config")
def run(cfg: DictConfig):
    device = 'cuda' if cfg.model.use_cuda and torch.cuda.is_available() else 'cpu'
    embeddings_model = LocalEmbeddings(model_name_or_path=cfg.model.model_name_or_path, device=device)
    db = FaissVecotrDB(dim=cfg.model.embed_dim, index_type=cfg.database.index_type, device='cpu', embeddings_model=embeddings_model)
    db.load(cfg.database.file_path)
    query = [
        r'''Implementation of rotary encoding in llama''',
        r'''class MinistralModelTester(CausalLMModelTester):
    config_class = MinistralConfig

    if is_torch_available():
        base_model_class = MinistralModel
        causal_lm_class = MinistralForCausalLM
        sequence_class = MinistralForSequenceClassification
        token_class = MinistralForTokenClassification
        question_answering_class = MinistralForQuestionAnswering
''',
        r'''public class MinistralModelTester extends CausalLMModelTester {
    private static Class<?> configClass;
    private static Class<?> baseModelClass;
    private static Class<?> causalLmClass;
    private static Class<?> sequenceClass;
    private static Class<?> tokenClass;
    private static Class<?> questionAnsweringClass;

    static {
        configClass = MinistralConfig.class;

        if (TorchUtils.isTorchAvailable()) {  // 模拟 is_torch_available()
            baseModelClass = MinistralModel.class;
            causalLmClass = MinistralForCausalLM.class;
            sequenceClass = MinistralForSequenceClassification.class;
            tokenClass = MinistralForTokenClassification.class;
            questionAnsweringClass = MinistralForQuestionAnswering.class;
        }
    }

    public static Class<?> getBaseModelClass() {
        return baseModelClass;
    }
}
''',
        r'''class MinistralModelTester(CausalLMModelTester):
    config_class = MinistralConfig

    if is_torch_available():
        _model_classes = {
            "base_model_class": MinistralModel,
            "causal_lm_class": MinistralForCausalLM,
            "sequence_class": MinistralForSequenceClassification,
            "token_class": MinistralForTokenClassification,
            "question_answering_class": MinistralForQuestionAnswering,
        }
        locals().update(_model_classes)
''']
    print(db.search(queries=query, k=4))


if __name__ == "__main__":
    run()