"""
This script produces completions for roughly any AutoModelForCausalLM.
Use Instruct Mode, will generate full function code, including the function signature.
"""
import torch
from typing import List
from multipl_e.multipl_e.completions import make_main, partial_arg_parser
from vllm import LLM, SamplingParams
from rag.retriever import KnowledgeRetriever, CodeRetriever
from prompts.gen_prompts import t_problem, t_cot, t_relevant, t_knowledge, t_code
from utils.process_utils import split_cots, extract_content, dict2str, load_shot


class VLLM:
    def __init__(self, name, revision, tokenizer_name=None, tokenizer_revision=None, num_gpus=1, knowledge_retriever=None, code_retriever=None, args=None):
        dtype = "float16"
        if torch.cuda.is_bf16_supported():
            dtype = "bfloat16"
        self.model = LLM(
            model=name,
            tokenizer=tokenizer_name,
            dtype=dtype,
            revision=revision,
            max_model_len=16384,
            tokenizer_revision=tokenizer_revision,
            trust_remote_code=True,
            tensor_parallel_size=num_gpus,
            gpu_memory_utilization=0.85,
        )
        self.knowledge_retriever = knowledge_retriever
        self.code_retriever = code_retriever
        self.args = args
    def completions(
        self, prompts: List[str], max_tokens: int, temperature: float, top_p, stop, template_name: str
    ):
        params = SamplingParams(temperature=temperature, top_p=top_p, max_tokens=2048, min_tokens=8, stop=['</code>'], include_stop_str_in_output=True)
        prompts = [prompt.strip() for prompt in prompts]

        # stage 1: generate cot and initial code
        e_p, e_cot, e_rel, e_k, e_res = load_shot(self.args.lang) # load one shot example
        prompts_1 = [t_problem(e_p) + t_cot(e_cot) + t_code(e_res) + t_problem(prompt) for prompt in prompts]
        outputs_1 = self.model.generate(prompts_1, params, use_tqdm=False)
        completions_1 = [o.outputs[0].text for o in outputs_1]

        # stage 2: RAG
        if self.args.knowledge_retrieval and not self.args.code_retrieval:
            # 提取 COT 部分并切分
            cots = extract_content(completions_1, "thinking")
            cots_steps = split_cots(cots)

            # 利用每个 thinking step 进行检索
            knowledges = []
            for cot_steps in cots_steps:
                # cot_steps is a list of thinking steps for one completion
                knowledge = self.knowledge_retriever.retrieve(cot_steps)[:10]
                knowledges.append(knowledge)
            
            # 将 knowledges 转换为字符串
            knowledges = ['\n\n'.join([dict2str(knowledge) for knowledge in knowledge_list]) for knowledge_list in knowledges]

            # 利用检索到的知识构造 prompt
            prompts_2 = []
            for problem, cot, knowledge in zip(prompts, cots_steps, knowledges):
                # prompt = t_problem(e_p) + t_cot(e_cot) + t_knowledge(e_k) + t_code(e_res) + \
                #          t_problem(problem) + t_cot(cot) + t_knowledge(knowledge) + "\n### Code Implementation: \n<code>\n" + problem
                prompt = t_knowledge(knowledge) + t_problem(problem) + t_cot(cot) + "\n### Code Implementation: \n<code>\n" + problem
                prompts_2.append(prompt)
        elif self.args.code_retrieval and not self.args.knowledge_retrieval:
            # 提取生成的代码
            cots = extract_content(completions_1, "thinking")
            codes = extract_content(completions_1, "code")
            # 代码检索
            relevant_codes = self.code_retriever.retrieve(codes, top_k=10)
            # Merge each sub-list into one string, split by \n
            relevant_codes = ['\n\n'.join(code_list) for code_list in relevant_codes]
            # 构造 prompt
            prompts_2 = []
            for problem, cot, rel in zip(prompts, cots, relevant_codes):
                prompt = t_problem(e_p) + t_cot(e_cot) + t_relevant(e_rel) + t_code(e_res) + \
                         t_problem(problem) + t_cot(cot) + t_relevant(rel) + "\n### Code Implementation: \n<code>\n" + problem
                prompts_2.append(prompt)

        elif self.args.knowledge_retrieval and self.args.code_retrieval:
            # 提取 COT 部分并切分
            cots = extract_content(completions_1, "thinking")
            cots_steps = split_cots(cots)
            codes = extract_content(completions_1, "code")

            # 知识检索
            knowledges = []
            for cot_steps in cots_steps:
                knowledge = self.knowledge_retriever.retrieve(cot_steps)[:10]
                knowledges.append(knowledge)

            # 将 knowledges 转换为字符串
            knowledges = ['\n\n'.join([dict2str(knowledge) for knowledge in knowledge_list]) for knowledge_list in knowledges]

            # 代码检索
            relevant_codes = self.code_retriever.retrieve(codes, top_k=10)
            relevant_codes = ['\n\n'.join(code_list) for code_list in relevant_codes]

            # 构造 prompt
            prompts_2 = []
            for problem, cot, knowledge, rel in zip(prompts, cots, knowledges, relevant_codes):
                prompt = t_problem(e_p) + t_cot(e_cot) + t_knowledge(e_k) + t_relevant(e_rel) + t_code(e_res) + \
                         t_problem(problem) + t_cot(cot) + t_knowledge(knowledge) + t_relevant(rel) + \
                         "\n### Code Implementation: \n<code>\n" + problem
                prompts_2.append(prompt)

        else:
            raise ValueError("Invalid combination of retrieval options")

        outputs_2 = self.model.generate(prompts_2, params, use_tqdm=False)

        completions_2 = [o.outputs[0].text for o in outputs_2]
        full = [p + c for p, c in zip(prompts_2, completions_2)]
        final_codes = extract_content(full, "code")
        
        return final_codes, full


def automodel_partial_arg_parser():
    parser = partial_arg_parser()
    parser.add_argument("--name", type=str, required=True)
    parser.add_argument("--revision", type=str)
    parser.add_argument("--tokenizer_name", type=str)
    parser.add_argument("--tokenizer_revision", type=str)
    parser.add_argument("--name-override", type=str)
    parser.add_argument("--num_gpus", type=int, default=1)
    parser.add_argument("--template-name", type=str, default="cot_py")
    parser.add_argument("--knowledge-retrieval", choices=['true', 'false'], help="Use knowledge retrieval")
    parser.add_argument("--knowledge-path", type=str, help="Path to the knowledge dataset")
    parser.add_argument("--knowledge-index-cache-path", type=str, help="Path to cache the knowledge index")
    parser.add_argument("--knowledge-model-name", type=str, help="Name of the knowledge embedding model")
    parser.add_argument("--code-retrieval", choices=['true', 'false'], help="Use code retrieval")
    parser.add_argument("--code-index-cache-path", type=str, help="Path to cache the code index")
    parser.add_argument("--code-model-name", type=str, help="Name of the code embedding model")
    
    args = parser.parse_args()
    
    args.knowledge_retrieval = args.knowledge_retrieval in ['true', 'True']
    args.code_retrieval = args.code_retrieval in ['true', 'True']
    
    print("args of inference_cot_rag.py:", args)
    return args

def do_name_override(args):
    """
    Applies the --name-override flag, or uses the model name, correcting / and - which the rest of
    the toolchain does not like.
    """
    if args.name_override:
        name = args.name_override
    else:
        name = args.name.replace("/", "_").replace("-", "_")
    return name


def main():
    args = automodel_partial_arg_parser()
    knowledge_retriever = KnowledgeRetriever(model_name=args.knowledge_model_name, 
                                             index_path=args.knowledge_index_cache_path, 
                                             knowledge_path=args.knowledge_path,
                                             lang=args.lang)

    code_retriever = CodeRetriever(model_name=args.code_model_name,
                                   index_path=args.code_index_cache_path, 
                                   lang=args.lang)

    model = VLLM(args.name, args.revision, args.tokenizer_name, args.tokenizer_revision, args.num_gpus, knowledge_retriever, code_retriever, args)
    name = do_name_override(args)
    make_main(args, name, model.completions)


if __name__ == "__main__":
    main()
