#!/bin/bash

# Set default values
MODEL_NAME="unsloth/Meta-Llama-3.1-8B-Instruct"
LANG="rkt"
ROOT_DATASET="humaneval"
TEMPERATURE="0.2"
NUM_GPUS="2"
BATCH_SIZE="20"
COMPLETION_LIMIT="20"
OUTPUT_DIR_PREFIX="multiple_results"
TEMPLATE_TYPE="cot"

# RAG config
KNOWLEDGE_RETRIEVAL="true"
KNOWLEDGE_PATH="documents/knowledge/racket/racket_knowledge_v2.jsonl"
KNOWLEDGE_INDEX_CACHE_PATH="rag/index/"
KNOWLEDGE_MODEL_NAME="sentence-transformers/all-MiniLM-L6-v2"  # "jinaai/jina-embeddings-v2-base-code"

# Code RAG config
CODE_RETRIEVAL="false"
CODE_INDEX_CACHE_PATH="rag/index/"
CODE_MODEL_NAME="sentence-transformers/all-MiniLM-L6-v2"

TAG="rag"  # output dir tag, used to distinguish different RAG experiments

# Parse command line arguments
while [[ $# -gt 0 ]]; do
    case $1 in
        --name)
            MODEL_NAME="$2"
            shift 2
            ;;
        --lang)
            LANG="$2"
            shift 2
            ;;
        --root-dataset)
            ROOT_DATASET="$2"
            shift 2
            ;;
        --temperature)
            TEMPERATURE="$2"
            shift 2
            ;;
        --num-gpus)
            NUM_GPUS="$2"
            shift 2
            ;;
        --template-type)
            TEMPLATE_TYPE="$2"
            shift 2
            ;;
        --knowledge-model-name)
            KNOWLEDGE_MODEL_NAME="$2"
            shift 2
            ;;
        --code-model-name)
            CODE_MODEL_NAME="$2"
            shift 2
            ;;
        *)
            echo "Unknown option: $1"
            exit 1
            ;;
    esac
done

# Set Tag for different RAG experiments
knowledge_base_name=$(basename "${KNOWLEDGE_PATH%.*}")
if [ "$KNOWLEDGE_RETRIEVAL" = "true" ] && [ "$CODE_RETRIEVAL" = "false" ]; then
    TAG="${TAG}-${knowledge_base_name}"
elif [ "$CODE_RETRIEVAL" = "true" ] && [ "$KNOWLEDGE_RETRIEVAL" = "false" ]; then
    TAG="${TAG}-code"
elif [ "$KNOWLEDGE_RETRIEVAL" = "true" ] && [ "$CODE_RETRIEVAL" = "true" ]; then
    TAG="${TAG}-${knowledge_base_name}-code"
else
    TAG="${TAG}-none"
fi

echo "TAG: $TAG"

# template name = `template_type_lang`
TEMPLATE_NAME="${TEMPLATE_TYPE}_${LANG}"

# index path
KNOWLEDGE_INDEX_CACHE_PATH="$KNOWLEDGE_INDEX_CACHE_PATH/${knowledge_base_name}_${KNOWLEDGE_MODEL_NAME//\//_}.index"
CODE_INDEX_CACHE_PATH="$CODE_INDEX_CACHE_PATH/code_${LANG}_${CODE_MODEL_NAME//\//_}.index"

# Set output directory
OUTPUT_DIR="$OUTPUT_DIR_PREFIX/$ROOT_DATASET/${MODEL_NAME//\//_}/${LANG}_${TEMPLATE_NAME}_${TAG}"
mkdir -p "$OUTPUT_DIR"

# prompt dataset path
PROMPT_DATASET_PATH="/home/sjw/ljb/lr_rag/multipl_e/prompts/${ROOT_DATASET}-${LANG}-reworded.jsonl"

# Generation
if [ -n "$(ls -A "$OUTPUT_DIR")" ]; then
    echo "Output directory $OUTPUT_DIR already contains results. Skipping inference step."
else
    echo "Running inference..."
    python inference_cot_rag.py \
        --name "$MODEL_NAME" \
        --lang "$LANG" \
        --root-dataset "$ROOT_DATASET" \
        --temperature "$TEMPERATURE" \
        --output-dir "$OUTPUT_DIR" \
        --batch-size "$BATCH_SIZE" \
        --completion-limit "$COMPLETION_LIMIT" \
        --num_gpus "$NUM_GPUS" \
        --template-name "$TEMPLATE_NAME" \
        --use-local \
        --dataset "$PROMPT_DATASET_PATH" \
        --knowledge-retrieval "$KNOWLEDGE_RETRIEVAL" \
        --knowledge-path "$KNOWLEDGE_PATH" \
        --knowledge-index-cache-path "$KNOWLEDGE_INDEX_CACHE_PATH" \
        --knowledge-model-name "$KNOWLEDGE_MODEL_NAME" \
        --code-retrieval "$CODE_RETRIEVAL" \
        --code-index-cache-path "$CODE_INDEX_CACHE_PATH" \
        --code-model-name "$CODE_MODEL_NAME"

fi

# Execution
python multipl_e/evaluation/src/main.py --dir "$OUTPUT_DIR" --output-dir "$OUTPUT_DIR" --full-function

# Pass@k
python multipl_e/pass_k.py "$OUTPUT_DIR"