import logging
from concurrent.futures import ThreadPoolExecutor

from OpenAIUtils.OpenAIUtils import get_completion, get_logprobs, get_content
from OpenAIUtils.OutputParser import get_parse_fn

logger = logging.getLogger(__name__)

class Evaluator:
    def __init__(self, args):
        self.input_keys = args.input_keys
        # self.label_key = args.label_key
        self.parser = get_parse_fn(args.label_type)
        self.model = args.model
        self.batch_size = args.threads
        self.seed = args.seed

    def batch_run(self, batch_data, batch_size, prompt_template):
        def helper(item):
            input_dict = {}
            for key in self.input_keys:
                input_dict[key] = item[key]
            try:
                prompt = prompt_template.get_prompt(**input_dict)
            except Exception as e:
                logger.error(f"Failed to generate prompt: {prompt_template}")
                return '', -1e9
            response = get_completion(self.model, prompt, seed=self.seed)
            text = get_content(response)
            label = self.parser(text)
            log_probs = get_logprobs(response)
            log_prob = sum([prob for _, prob in log_probs])
            return label, log_prob

        with ThreadPoolExecutor(max_workers=batch_size) as executor:
            return list(executor.map(helper, batch_data))

    def run(self, data, prompt_template):
        pred_labels = []
        pred_logprobs = []
        for i in range(0, len(data), self.batch_size):
            batch_data = data[i:min(i + self.batch_size, len(data))]
            batch_results = self.batch_run(batch_data, self.batch_size, prompt_template)
            for label, log_prob in batch_results:
                pred_labels.append(label)
                pred_logprobs.append(log_prob)
        return pred_labels, pred_logprobs

