# -*-coding:utf-8 -*-
import argparse
import os 
os.environ["CUDA_VISIBLE_DEVICES"] = "5"

import torch
from vllm import LLM, SamplingParams
from transformers import LlamaTokenizer
from transformers import AutoModelForCausalLM, BitsAndBytesConfig, AutoTokenizer
from peft import PeftModel
import process_file as pf
import re
import tree_sitter_related as ts

sampling_params = SamplingParams(temperature=0.8, top_p=0.95, max_tokens=1024)

'''
0: 调用FUT的函数, dict_info["CoT_FUT_be_call_ex"]
1: FUT调用的函数, dict_info["CoT_FUT_call_ex"]
2: FUT的父模块, dict_info["CoT_FUT_parent"]
'''

# 固定变量
folder_path = '../dataset/test/'


def main():
    # add argument
    parser = argparse.ArgumentParser()
    parser.add_argument('--max_token', type=int, default=2048, help="Maximum token value")
    parser.add_argument('--order', type=str, default="021", help="Order string, e.g., '012'")
    parser.add_argument('--model_path', type=str, defalut="../../model_finetune/result/model_finetune", help="Path to the LLM model")
    parser.add_argument('--dict_key_name', type=str, defalut="model_finetune", help="Name of the saved key")
    args = parser.parse_args()

    tokenizer = LlamaTokenizer.from_pretrained(args.model_path)
    llm = LLM(model=args.model_path)


    # 默认重要程度排序
    list_key_important = [
        "CoT_FUT_be_call_ex",
        "CoT_FUT_call_ex",
        "CoT_FUT_parent"
    ]
    list_comment_important = [
        "Methods to call the Focal Method",
        "Internal methods used by the Focal Method",
        "Parent module of the Focal Method"
    ]


    # 调换list顺序
    list_key_important = pf.reorder_list(list_key_important, args.order)
    list_comment_important = pf.reorder_list(list_comment_important, args.order)


    for filename in os.listdir(folder_path):
        list_prompt = []
        if filename.endswith('.json'):
            file_path = os.path.join(folder_path, filename)
            list_dict_info = pf.read_json(file_path)

            for dict_info in list_dict_info:
                # 构造prompt
                list_info_important = []
                for key in list_key_important:
                    list_info_important.append(dict_info[key])
                id_awaiting_compress = 2

                len_token = 0
                for item in list_info_important:
                    # item list -> str
                    str_list_merge = "\n".join(item)
                    len_token += pf.token_cal(str_list_merge, args.model_path)

                while(id_awaiting_compress >= 0 and len_token > args.max_token):
                    # 压缩item(str)
                    for item in list_info_important[id_awaiting_compress]:
                        item = ts.str_compress(item)
                    len_token = 0
                    for item in list_info_important:
                        str_list_merge = "\n".join(item)
                        len_token += pf.token_cal(str_list_merge, args.model_path)
                    id_awaiting_compress -= 1

                # 仍然需要压缩（全部删除）
                id_awaiting_compress = 2
                while(id_awaiting_compress >= 0 and len_token > args.max_token):
                    list_info_important[id_awaiting_compress] = []
                    len_token = 0
                    for item in list_info_important:
                        str_list_merge = "\n".join(item)
                        len_token += pf.token_cal(str_list_merge, args.model_path)
                    id_awaiting_compress -= 1
                
                list_str_prompt_important = []
                for item in list_info_important:
                    list_str_prompt_important.append("\n".join(item))

                prompt = f'/*Your task is to generate a Test Function for the Rust Focal Method, ' \
                    f'and you may need to use the following information.*/\n' \
                    f'// Rust Focal Method\n' \
                    f'{dict_info["FUT_full"]}\n' \
                    f'// The Intention of the Focal Method\n' \
                    f'/*{dict_info["FUT_intention_GPT"]}*/\n' \
                    f'// {list_comment_important[0]}\n' \
                    f'{list_str_prompt_important[0]}\n' \
                    f'// {list_comment_important[1]}\n' \
                    f'{list_str_prompt_important[1]}\n' \
                    f'// {list_comment_important[2]}\n' \
                    f'{list_str_prompt_important[2]}\n' \
                    f'// Input and Output Examples for the Focal Method\n' \
                    f'/*{dict_info["FUT_case_GPT"]}*/\n' \
                    f'// Test Function'



                list_prompt.append(prompt)
            outputs = llm.generate(list_prompt, sampling_params)

            for output in outputs:
                prompt = output.prompt
                for dict_info in list_dict_info:
                    func_prompt_sub_1 = f'{dict_info["FUT_full"]}\n' \
                                        f'// The Intention of the Focal Method\n' \
                                        f'/*{dict_info["0223_FUT_intention_GPT"]}*/\n'
                    func_prompt_sub_2 = f'/*{dict_info["0223_FUT_case_GPT"]}*/\n'
                    if func_prompt_sub_1 in prompt and func_prompt_sub_2 in prompt:
                        dict_info[args.dict_key_name] = output.outputs[0].text
                        break
            pf.save_data_to_json(list_dict_info, file_path)
            print(f"{filename} success!!!")





if __name__ == "__main__":
    main()


