from transformers import AutoTokenizer

from tqdm import tqdm
import os

from utils import FileUtils


def prompt_path(base_dir: str, save_name: str, *, ext: str = "jsonl"):
    return os.path.join(base_dir, f"{save_name}.{ext}")


class PromptBuilder:
    def __init__(self, *,
                 model_path: str,
                 max_retrieval_length: int = 4096,
                 test_dataset_path: str,
                 retrieved_func_block_path: str,
                 retrieved_code_block_path: str,
                 retrieved_api_path: str,
                 save_base: str):
        self.save_base = save_base
        self.retrieved_api_path = retrieved_api_path
        self.retrieved_code_block_path = retrieved_code_block_path
        self.retrieved_func_block_path = retrieved_func_block_path
        self.test_dataset_path = test_dataset_path
        self.tokenizer = AutoTokenizer.from_pretrained(model_path, local_files_only=True)
        self.max_retrieval_length = max_retrieval_length

    def build_prompt(self, test_item: dict, *,
                     function_blocks: list[dict],
                     code_blocks: list[dict],
                     apis: list[dict],
                     collect_last_pos: bool = False,
                     max_context: int | None = None,
                     max_rel: int | None = None) -> str | tuple[str, tuple[int, int, int]]:
        all_len = 0
        all_context = ""
        flag: bool = True

        max_api_pos = -1
        max_func_block_pos = -1
        max_code_block_pos = -1

        if len(apis) > 0 and flag:
            prepend_context = "# Here are some relevant apis in the repo:\n"
            tokens = self.tokenizer(prepend_context)
            token_len = len(tokens)
            all_len += token_len
            all_context += prepend_context
            for i, api in enumerate(apis):
                api_sig = api["signature"]
                tokens = self.tokenizer(api_sig)
                token_len = len(tokens)
                if all_len + token_len <= self.max_retrieval_length:
                    all_len += token_len
                    all_context += api_sig + "\n"
                    max_api_pos = i
                else:
                    flag = False
                    break

        if len(function_blocks) > 0 and flag:
            prepend_context = "# Here are some relevant functions:\n"
            tokens = self.tokenizer.encode(prepend_context)
            token_len = len(tokens)
            all_len += token_len
            all_context += prepend_context
            for i, func_block in enumerate(function_blocks):
                context = func_block["context"]
                tokens = self.tokenizer.encode(context)
                token_len = len(tokens)
                if all_len + token_len <= self.max_retrieval_length:
                    all_len += token_len
                    all_context += context + "\n"
                    max_func_block_pos = i
                else:
                    flag = False
                    break

        if len(code_blocks) > 0 and flag:
            prepend_context = "# Here are some relevant code fragments from other files of the repo:\n"
            tokens = self.tokenizer.encode(prepend_context)
            token_len = len(tokens)
            all_len += token_len
            all_context += prepend_context
            for i, code_block in enumerate(code_blocks):
                context = code_block["context"]
                tokens = self.tokenizer.encode(context)
                token_len = len(tokens)

                if all_len + token_len <= self.max_retrieval_length:
                    all_len += token_len
                    all_context += context + "\n"
                    max_code_block_pos = i
                else:
                    flag = False
                    break

        all_context += ("=" * 20 + "\n")
        all_context += "# Please complete the code snippet:\n"
        test_prompt = test_item["prompt"]
        if max_context is not None:
            tokens = self.tokenizer.encode(test_prompt)
            tokens = tokens[-max_context:]
            test_prompt = self.tokenizer.decode(tokens)
        test_prompt = all_context + test_prompt

        if not collect_last_pos:
            return test_prompt
        else:
            return test_prompt, (max_api_pos, max_func_block_pos, max_code_block_pos)

    def build_for_dataset(self, *,
                          save_name: str,
                          remove_api: bool = False,
                          remove_func_block: bool = False,
                          remove_code_block: bool = False,
                          max_pos_path: str | None = None,
                          max_context: int | None = None):
        test_dataset: list[dict] = FileUtils.load_jsonl(self.test_dataset_path)
        retrieved_function_blocks: dict[str, list[dict]] = FileUtils.load_pickle(self.retrieved_func_block_path)
        retrieved_code_blocks: dict[str, list[dict]] = FileUtils.load_pickle(self.retrieved_code_block_path)
        retrieved_apis: dict[str, list[dict]] = FileUtils.load_pickle(self.retrieved_api_path)

        pos_flag: bool = max_pos_path is not None
        max_pos_info = FileUtils.load_pickle(max_pos_path) if pos_flag else None

        prompt_all: list[dict[str, str]] = []
        for data in tqdm(test_dataset):
            metadata = data["metadata"]
            task_id = metadata["task_id"]
            function_blocks = retrieved_function_blocks.get(task_id, [])
            code_blocks = retrieved_code_blocks.get(task_id, [])
            apis = retrieved_apis.get(task_id, [])
            apis = apis[:5]

            if pos_flag:
                ma, mf, mc = max_pos_info[task_id]
                apis = apis[:ma + 1]
                function_blocks = function_blocks[:mf + 1]
                code_blocks = code_blocks[:mc + 1]

            if remove_api:
                apis = []
            if remove_func_block:
                function_blocks = []
            if remove_code_block:
                code_blocks = []

            prompt = self.build_prompt(data, function_blocks=function_blocks, code_blocks=code_blocks, apis=apis, max_context=max_context)
            prompt_all.append(
                {
                    "task_id": task_id,
                    "prompt": prompt
                }
            )

        FileUtils.save_jsonl(prompt_all, prompt_path(self.save_base, save_name, ext="jsonl"))

