import requests
import pandas as pd
import os
import time
from urllib.parse import quote
from tqdm import tqdm
from util import extract_code_info, extract_code_lines

token = "your github token"
def search_github_code(query, per_page=5, page=1, token=None, max_retries = 3,retry_delay=5,max_length=256):
    url = "https://api.github.com/search/code"
    headers = {
        "Accept": "application/vnd.github+json",
    }
    
    if token:
        headers["Authorization"] = f"Bearer {token}"
    if isinstance(query, str):
        search_query = prepare_search_string(query)
    elif isinstance(query, list):
        query = [prepare_search_string(q) for q in query]
        if not query:
            return None
        search_query = query[0]
        for i in range(1,len(query)):
            if len(search_query + " " + query[i]) < max_length:
                search_query = search_query + " " + query[i]
            else:
                break

    params = {
        "q": search_query,
        "type":"code",
        "per_page": per_page,
        "page": page
    }

    for attempt in range(max_retries):
        try:
            response = requests.get(url, headers=headers, params=params, verify=False)
            response.raise_for_status()  
            return response.json().get("items", [])
        except (requests.exceptions.RequestException, requests.exceptions.ProxyError, requests.exceptions.ConnectionError) as e:
            if attempt < max_retries - 1:
                time.sleep(retry_delay)
                continue
            else:
                return None

def extract(prefix,suffix,code):
    start = code.find(prefix)
    if start == -1:  
        return None
    start += len(prefix)  
    if suffix != "":
        end = code.find(suffix, start)
        if end == -1:  
            return None
        return code[start:end]
    else:
        return code[start:]
def get_file_content(download_url, token=None,verify=False,max_retries = 3,retry_delay=5):
    headers = {}
    if token:
        headers["Authorization"] = f"Bearer {token}"
    
    for attempt in range(max_retries):
        try:
            response = requests.get(download_url, headers=headers, verify=verify)
            response.raise_for_status()  
            return response.text
        except (requests.exceptions.RequestException, requests.exceptions.ProxyError, requests.exceptions.ConnectionError) as e:
            if attempt < max_retries - 1:
                time.sleep(retry_delay)
                continue
            else:
                return None

def load_data(input_filepath = "Q_B_without_answer.jsonl"):
    df = pd.read_json(input_filepath,lines=True)
    prefixs = df["prefix"].tolist()
    suffixs = df["fim_suffix"].tolist()
    return prefixs, suffixs

def convert_to_raw_url(github_url):
    raw_url = github_url.replace("github.com", "raw.githubusercontent.com")
    raw_url = raw_url.replace("/blob", "")
    return raw_url

def prepare_search_string(input_str):
    escaped_backslashes = input_str.replace('\\', '\\\\')

    escaped_quotes = escaped_backslashes.replace('"', '\\"')
    search_str = f'"{escaped_quotes}"'
    return search_str
def get_last_k_chars(text, k = 256):
    return text[-k:] if len(text) > k else text
def get_first_k_chars(text, k = 256):
    return text[:k] if len(text) > k else text

def save_rag_results(rag_results,path="rag_results.jsonl"):
    df = pd.DataFrame(rag_results)
    df.to_json(path, orient='records', lines=True)


def load_rag_results(path="rag_results.jsonl"):
    df = pd.read_json(path, lines=True)
    rows_as_lists = df.values.tolist()
    return rows_as_lists

def main(rag_path = None):
    prefixs, suffixs= load_data()
    size = len(prefixs)
    if rag_path is None:
        rag_results = []
    else:
        rag_results = load_rag_results(rag_path)

    start_index = len(rag_results)
    for i in tqdm(range(start_index,size)):
        # i = 5
        prefix = get_last_k_chars(prefixs[i],256)
        results = search_github_code(query=prefix, per_page=10, token=token)
        one_piece_rag_results = []
        if results:
            for item in results:
                file_path = item['path']
                file_name = os.path.basename(file_path)
                raw_url = convert_to_raw_url(item['html_url'])
                content = get_file_content(raw_url,token=token)

                rag_result = {
                    "file_name": file_name,
                    "content": content,
                }
                one_piece_rag_results.append(rag_result)

        rag_results.append(one_piece_rag_results)
        save_rag_results(rag_results)
        time.sleep(6)
def search_again(rag_path = "rag_results.jsonl"):
    prefixs, suffixs= load_data()
    size = len(prefixs)
    rag_results = load_rag_results(rag_path)
    for i in tqdm(range(size)):
        if rag_results[i] is None:
            prefix_feature = extract_code_info(prefixs[i])
            suffix_feature = extract_code_info(suffixs[i])
            features = prefix_feature["functions"] + prefix_feature["classes"] +  suffix_feature["functions"] + suffix_feature["classes"] + prefix_feature["comments"] +suffix_feature["comments"]
            results = search_github_code(features,per_page=10,token=token)
            one_piece_rag_results = []
            if results:
                for item in results:
                    file_path = item['path']
                    file_name = os.path.basename(file_path)
                    raw_url = convert_to_raw_url(item['html_url'])
                    content = get_file_content(raw_url,token=token)


                    rag_result = {
                        "file_name": file_name,
                        "content": content,
                    }
                    one_piece_rag_results.append(rag_result)

            rag_results[i] = one_piece_rag_results
            save_rag_results(rag_results)

def third_search(rag_path = "rag_results.jsonl"):
    prefixs, suffixs= load_data()
    size = len(prefixs)
    rag_results = load_rag_results(rag_path)   

    for i in tqdm(range(size)):
        if rag_results[i] is None:
            prefix_feature = extract_code_lines(prefixs[i])
            suffix_feature = extract_code_lines(suffixs[i])
            # 去掉comments特征
            features = prefix_feature["functions"] + prefix_feature["classes"] +  suffix_feature["functions"] + suffix_feature["classes"]
            results = search_github_code(features,per_page=10,token=token)
            one_piece_rag_results = []
            if results:
                for item in results:
                    file_path = item['path']
                    file_name = os.path.basename(file_path)
                    raw_url = convert_to_raw_url(item['html_url'])
                    content = get_file_content(raw_url,token=token)
                    rag_result = {
                        "file_name": file_name,
                        "content": content,
                    }
                    one_piece_rag_results.append(rag_result)
            rag_results[i] = one_piece_rag_results
            save_rag_results(rag_results)

if __name__ == "__main__":
    search_again()