import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import pandas as pd
from tqdm import tqdm
import os
from util import load_search_result, truncate_inputs_list, load_rag_results, get_prompt, get_prompt_with_snippets,load_data,get_most_relvant_rag_lines, get_first_line




def load_last_result(input_filepath="Q_B_with_answer.jsonl"):
    df = pd.read_json(input_filepath,lines=True)
    responses = df["responses"].tolist()
    mids = df["mids"].tolist()
    return responses, mids

def load_mids(path ="mids.json"):
    df = pd.read_json(path,orient="values")
    return df[0].tolist()
def save_mids(mids,path = "mids.json"):
    df_res = pd.DataFrame([[mid] for mid in mids])
    df_res.to_json(path, orient="values")
def extract_mid_to_eot(code,suffix):
    if code is None:
        return None
    start_index = code.find("<MID>") + len("<MID>")
    end_index = code.find("<EOT>")
    if start_index != -1 and end_index != -1 and start_index < end_index:
        return code[start_index:end_index]
    elif start_index != -1 and end_index == -1:
        end_str = suffix[:32]
        end_index = code.find(end_str,start_index)
        return code[start_index:end_index]
    else:
        return None 

def load_model(base_model_path):
    model = AutoModelForCausalLM.from_pretrained(
        base_model_path,
        torch_dtype=torch.float16,  
        device_map="auto",      
    )

    tokenizer = AutoTokenizer.from_pretrained(base_model_path, use_fast=True)
    return model, tokenizer


def save(prefixs,suffixs,mids,responses):
    df_res = pd.DataFrame([[mid] for mid in mids])
    df_res.to_json("output.json",orient="values")
    
    data = {
        "prefix": prefixs,
        "fim_suffix": suffixs,
        "responses": responses,
        "mids": mids
    }
    df = pd.DataFrame(data)

    df.to_json("Q_B_with_answer.jsonl", orient="records", lines=True)

def get_max_length(buffer_length=20):
    tokenizer = AutoTokenizer.from_pretrained("your/model/path")
    prefixs, suffixs = load_data("Q_A_without_answer.jsonl")
    size = len(prefixs)
    data = [get_prompt(prefixs[i],suffixs[i]) for i in range(size)]
    max_input_length = max(len(tokenizer.encode(text, return_tensors="pt")[0]) for text in data)
    print(max_input_length)
    max_length = max_input_length + buffer_length
    return max_length

def infill_code(model, tokenizer, prompt_prefix: str, prompt_suffix: str, snippets = None, filenames=None,max_new_tokens: int = 512, temperature: float = 0.7, top_k: int = 50, top_p: float = 0.95, skip_special_tokens = False):
    """
    使用CodeLlama模型进行代码infilling任务。
    """
    if snippets is None:
        prompt = get_prompt(prompt_prefix, prompt_suffix)
    else:
        prompt = get_prompt_with_snippets(prompt_prefix, prompt_suffix, snippets,filenames=filenames)


    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    with torch.no_grad():
        output_ids = model.generate(
            inputs['input_ids'],
            max_new_tokens = max_new_tokens,
            num_return_sequences=1, 
            do_sample=True, 
            temperature=temperature, 
            top_k=top_k,        
            top_p=top_p, 
        )
  
    generated_code = tokenizer.decode(output_ids[0], skip_special_tokens=skip_special_tokens)
    torch.cuda.empty_cache()
    return generated_code
 

def main(input_filepath, base_model_path, search_result_filepath = None, do_truncate = False, internet_search = False):

    prefixs, suffixs = load_data(input_filepath)
    model, tokenizer = load_model(base_model_path)
    size = len(prefixs)

    if search_result_filepath:
        if internet_search:
            search_results = load_rag_results(search_result_filepath)
            filenames = [[r["file_name"]] for r in search_results]
            search_results = [[get_most_relvant_rag_lines(search_results[i]["content"],prefixs[i],suffixs[i])] for i in range(size)]

        else:
            search_results = load_search_result(search_result_filepath)
            filenames = [None]*len(search_results)
    else:
        search_results = None
    if do_truncate:
        prefixs, suffixs = truncate_inputs_list(prefixs, suffixs, tokenizer,search_results=search_results)
    if os.path.exists("Q_B_with_answer.jsonl"):
        responses, mids = load_last_result()
        print(len(responses))
        print(size)
        if len(responses)==size:
            print("return")
            return
        responses.append(None)
        mids.append(None)
        start_index = len(responses)
        save(prefixs[0:len(mids)],suffixs[0:len(mids)],mids,responses)
    else:
        responses = []
        mids = []
        start_index = 0
    for i in tqdm(range(start_index, size), desc="Processing"):
        if search_result_filepath is None:
            code = infill_code(model, tokenizer, prefixs[i], suffixs[i])
        else:
            code = infill_code(model, tokenizer, prefixs[i], suffixs[i] ,snippets=search_results[i], filenames=filenames[i])
        responses.append(code)
        mid = extract_mid_to_eot(code,suffixs[i])
        mids.append(mid)
        save(prefixs[0:len(mids)],suffixs[0:len(mids)],mids,responses)





if __name__ == "__main__":
    main("Q_B_without_answer.jsonl","your/model/path",search_result_filepath="retrival/rag_results.jsonl",internet_search=True,do_truncate=True)

