import pandas as pd
from language_detector import format_snippet

def load_search_result(filepath = "search_result.jsonl"):
    if filepath is None:
        return None
    df = pd.read_json(filepath, lines = True)
    result = df.values.tolist()
    return result
def load_data(input_filepath):
    df = pd.read_json(input_filepath,lines=True)
    prefixs = df["prefix"].tolist()
    suffixs = df["fim_suffix"].tolist()
    return prefixs, suffixs

def truncate_inputs(prefix, suffix, tokenizer, max_length=1024):
    prefix_tokens = tokenizer.encode(prefix, return_tensors="pt")[0][-max_length:]

    suffix_tokens = tokenizer.encode(suffix, return_tensors="pt")[0][:max_length]

    truncated_prefix = tokenizer.decode(prefix_tokens)
    truncated_suffix = tokenizer.decode(suffix_tokens)
    
    return truncated_prefix, truncated_suffix
def truncate_inputs_by_line(prefix, suffix, tokenizer, max_length, prefix_ratio=0.8):
    max_prefix_length = int(max_length * prefix_ratio)
    max_suffix_length = max_length - max_prefix_length
    
    prefix_lines = prefix.splitlines()
    prefix_tokens = []
    for line in reversed(prefix_lines):
        line_tokens = tokenizer.encode(line + "\n", return_tensors="pt")[0].tolist()  # 转换为列表并加上换行符
        if len(prefix_tokens) + len(line_tokens) > max_prefix_length:
            prefix_tokens = line_tokens[-(max_prefix_length - len(prefix_tokens)):] + prefix_tokens
            break
        else:
            prefix_tokens = line_tokens + prefix_tokens

    suffix_lines = suffix.splitlines()
    suffix_tokens = []
    for line in suffix_lines:
        line_tokens = tokenizer.encode(line + "\n", return_tensors="pt")[0].tolist()  # 转换为列表并加上换行符
        if len(suffix_tokens) + len(line_tokens) > max_suffix_length:
            suffix_tokens += line_tokens[:max_suffix_length - len(suffix_tokens)]
            break
        else:
            suffix_tokens += line_tokens
    truncated_prefix = tokenizer.decode(prefix_tokens, skip_special_tokens=True)
    truncated_suffix = tokenizer.decode(suffix_tokens, skip_special_tokens=True)
    
    return truncated_prefix, truncated_suffix



def truncate_inputs_list(prefixs, suffixs, tokenizer, search_results=None,max_length=512, prefix_ratio=0.8):
    truncated_prefixes = []
    truncated_suffixes = []
    if search_results is None:
        search_results = [[None]]*len(prefixs)
    for prefix, suffix,search_result in zip(prefixs, suffixs, search_results):
        if search_result[0] is None:
            truncated_prefixes.append(prefix)
            truncated_suffixes.append(suffix)
            continue
        truncated_prefix, truncated_suffix = truncate_inputs_by_line(
            prefix, suffix, tokenizer, max_length=max_length, prefix_ratio=prefix_ratio
        )
        truncated_prefixes.append(truncated_prefix)
        truncated_suffixes.append(truncated_suffix)
    
    return truncated_prefixes, truncated_suffixes
def get_first_line(text: str) -> str:
    lines = text.splitlines()
    for line in lines:
        if line.strip():  
            return line
    return "" 
def load_rag_results(path="rag_results.jsonl"): 
    df = pd.read_json(path, lines=True)
    results = df[0].tolist()
    for i in range(len(results)):
        if results[i] is None:
            data = {
                "file_name":None,
                "content":None
            }
            results[i] = data
    return results

def get_most_relvant_rag_lines(rag_content,prefix,suffix,min_return_lines = 10):
    if rag_content is None:
        return None
    prefix_lines = prefix.splitlines()
    prefix_locate_line = ""
    i = 0
    for line in reversed(prefix_lines):
        i += 1
        if len(line) > 32 and line in rag_content:
            prefix_locate_line = line
            if i<=min_return_lines:
                continue
            else:
                break

    suffix_lines = suffix.splitlines()
    suffix_locate_line = ""
    i = 0
    for line in suffix_lines:
        i += 1
        if len(line) > 32 and line in rag_content:
            suffix_locate_line = line
            if i<=min_return_lines:
                continue
            else:
                break

    if suffix_locate_line == "":
        return None
    start = rag_content.find(prefix_locate_line) 
    if suffix != "":
        end = rag_content.find(suffix_locate_line, start)
        end += len(suffix_locate_line)
        return rag_content[start:end]
    else:
        return rag_content[start:]


def get_prompt(prefix, suffix):
    prompt = "<PRE> " + prefix + " <SUF>" + suffix + " <MID>"
    return prompt

def get_prompt_with_snippets(prefix, suffix, snippets, filenames = None):
    size = len(snippets)
    formated_snippets = []
    if filenames is None:
        filenames = [None]*size
    for i in range(size):
        if snippets[i] is None:
            continue
        formated_snippets.append(format_snippet(snippets[i],filename=filenames[i]))
    concated_snippets = "\n".join(formated_snippets)
    prompt = "<PRE> " + concated_snippets + "\n\n" + prefix + " <SUF>" + suffix + " <MID>"
    return prompt
if __name__ == "__main__":
    pass