KB-VQA-E / my_model /config /evaluation_config.py
m7mdal7aj's picture
Update my_model/config/evaluation_config.py
6d833cc verified
raw
history blame
536 Bytes
import os
FUZZY_SCORE = 80
USE_FUZZY = False # this was used at the initial stage of the evaluation only, then manually reviewed as detailed in the report.
EVALUATION_DATA_PATH = 'my_model/results/evaluation_results.xlsx'
MODEL_NAMES = ['13b', '7b'] # LLaMA-2 varients used.
MODEL_CONFIGURATIONS = ['caption+detic', 'caption+yolov5', 'only_caption', 'only_detic', 'only_yolov5'] # Ablation study on multiple model configurations
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
GPT4_MAX_TOKENS=100
GPT4_TEMPERATURE = 0.1
GPT4_SEED = 123