import os
import csv
import time
import json
import shutil
import requests
from concurrent.futures import ThreadPoolExecutor, as_completed

# 获取一个组织的仓库
# https://gitee.com/api/v5/swagger#/getV5OrgsOrgRepos
access_token = '4eef940356d19836f649a011da5f92ad'
global_review_list = []
global_error_list = []
global_repo_list = []
# 用于创建目录
def make_directory(directory_path):
    if os.path.exists(directory_path):
        shutil.rmtree(directory_path)
    os.makedirs(directory_path)

def fetch_repo_from_org(org):
    type='all'
    page=1
    per_page=50
    num = 1
    # 创建org目录，如果存在清空文件夹中的内容
    # make_directory(org)
    repo_list_path = os.path.join(os.getcwd(), org + '_repo_list.csv')
    # 判断repo_list_path文件是否存在，如果存在则返回，不存在则从openapi抓取
    if not os.path.exists(repo_list_path):
        with open(os.path.join(os.getcwd(), org+'_repo_list.csv') , 'a', newline='') as f:
            writer = csv.writer(f)
            while True:
                url=f'https://gitee.com/api/v5/orgs/{org}/repos?access_token={access_token}&type={type}&page={page}&per_page={per_page}'
                try:
                    r = requests.get(url, timeout=5)
                except:
                    print("Error:", r.status_code)
                    continue
                else:
                    pass
                r.encoding='utf-8'
                if r.status_code != 200:
                    print("Error:", r.status_code)
                    break
                json_data = json.loads(r.text)
                if len(json_data) > 0:
                    for repo_data in json_data:
                        owner = repo_data['namespace']['path']
                        repo = repo_data['path']
                        # if owner == 'openharmony':
                        print(num, owner, repo)
                        # 保存owner和repo到json文件
                        writer.writerow([num, owner, repo])
                        num = num + 1
                        # fetch_pr_from_repo(owner, repo)
                        # else:
                        #     print(-1, owner, repo)
                else:
                    break     
                page += 1 
                print("page:", page)
        print("finish fetch_repo_from_org, saved：", repo_list_path)

    # 打开repo_list.csv文件
    with open(repo_list_path, 'r') as f:
        repo_reader = csv.reader(f)
        for row in repo_reader:
            global_repo_list.append(row[-1])

    batch_size = 32
    batch_cur = 0
    len_repo = len(global_repo_list)
    for batch_cur in range(len_repo // batch_size + 1):
        start_index = batch_size * batch_cur
        end_index = min(batch_size * (batch_cur + 1), len_repo)

    # end_index = start_index + batch_size
    # if end_index > len(global_repo_list):
    #     end_index = len(global_repo_list)
    # if start_index > len(global_repo_list):
    #     print("start_index > len(global_repo_list), all finished.")
    #     return
        print("###########################################################################")
        print("############################# batch ", str(batch_cur), " begin #############################")
        with ThreadPoolExecutor(max_workers=8) as executor:
            repo_num = 0
            futures = [executor.submit(process_repo, idx, repo_num) for idx in range(start_index, end_index)]
            for future in as_completed(futures):
                num = future.result() 
        print("########################### batch ", str(batch_cur), " finished ############################")
        print("###########################################################################")
        time.sleep(30)
        
    # batch_size = 100
    # batch_cur = 0
    # repo_len = len(global_repo_list)
    # while(batch_cur * batch_size < repo_len):
    #     start_index = batch_size * batch_cur
    #     end_index = start_index + batch_size
    #     if end_index > repo_len:
    #         end_index = repo_len
    #     if start_index > repo_len:
    #         print("start_index > len(global_repo_list), all finished.")
    #         return

    #     with ThreadPoolExecutor(max_workers=8) as executor:
    #         repo_num = 0
    #         futures = [executor.submit(process_repo, idx, repo_num) for idx in range(start_index, end_index)]
    #         for future in as_completed(futures):
    #             num = future.result() 
    #     batch_cur += 1
            


def fetch_repo_from_org_bak(org):
    
    type = 'all'
    page = 1
    per_page = 100
    repo_num = 1

    make_directory(org)
    with open(os.path.join(os.getcwd(), org, org+'_repo_list.csv'), 'a', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(["repo_num", "owner", "repo"])
        while True:
            url = f'https://gitee.com/api/v5/orgs/{org}/repos?access_token={access_token}&type={type}&page={page}&per_page={per_page}'
            r = requests.get(url)
            r.encoding = 'utf-8'
            if r.status_code != 200:
                break
            json_data = json.loads(r.text)
            if len(json_data) > 0:
                with ThreadPoolExecutor(max_workers=8) as executor:
                    futures = [executor.submit(process_repo, repo_data, writer, repo_num) for repo_data in json_data]
                    for future in as_completed(futures):
                        num = future.result() 
            else:
                break
            page += 1 

def process_repo(idx, repo_num):
    owner = "openharmony"
    repo = global_repo_list[idx]
    # if owner == 'openharmony':
    print("##############", owner, repo, "##############")
    repo_num += 1
    fetch_pr_from_repo(owner, repo)
    return repo_num

def fetch_pr_from_repo(owner, repo):
    page = 1
    per_page = 50
    state = 'merged'
    direction = 'asc'
    num = 1
    make_directory(os.path.join(os.getcwd(), owner, repo))

    with open(os.path.join(os.getcwd(), owner, repo, repo+'_pr_list.csv'), 'a', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(["num", "pr_number", "pr_url", "pr_diff"])                                                              
        while True:
            url = f'https://gitee.com/api/v5/repos/{owner}/{repo}/pulls?access_token={access_token}&state={state}&direction={direction}&page={page}&per_page={per_page}'
            try:
                r = requests.get(url, timeout=5)
            except requests.exceptions.HTTPError as http_err:
                print(f'HTTP error occurred: {http_err}')  # 处理HTTP错误
                print("error url:", url)
                global_error_list.append(url)
                continue
            except requests.exceptions.ConnectionError as conn_err:
                print(f'Error connecting: {conn_err}')  # 处理连接错误
                print("error url:", url)
                global_error_list.append(url)
                continue
            except requests.exceptions.Timeout as timeout_err:
                print(f'Timeout error: {timeout_err}')  # 处理超时错误
                print("error url:", url)
                global_error_list.append(url)
                continue
            except requests.exceptions.RequestException as err:
                print(f'An error occurred: {err}')  # 处理其他类型的请求异常
                print("error url:", url)
                global_error_list.append(url)
                continue
            else:
                # 请求成功，此处可以继续处理响应r
                pass
            r.encoding = 'utf-8'
            if r.status_code != 200:
                print(r.status_code)
                break
            json_data = json.loads(r.text)
            if len(json_data) > 0:
                with ThreadPoolExecutor(max_workers=8) as executor:
                    futures = [executor.submit(process_pr, pr_data, writer, num, owner, repo) for pr_data in json_data]
                    for future in as_completed(futures):
                        num = future.result()
            else:
                break
            page += 1

def process_pr(pr_data, writer, num, owner, repo):
    pr_number = pr_data['number']
    pr_url = pr_data['html_url']
    pr_diff = pr_data['diff_url']
    writer.writerow([num, pr_number, pr_url, pr_diff])
    num += 1
    fetch_review_from_pr(owner, repo, pr_number)
    return num

def fetch_review_from_pr(owner, repo, pr_number):
    page = 1
    per_page = 100
    direction = 'asc'
    comment_type = 'diff_comment'
    while True:
        url = f'https://gitee.com/api/v5/repos/{owner}/{repo}/pulls/{pr_number}/comments?access_token={access_token}&page={page}&per_page={per_page}&direction={direction}&comment_type={comment_type}'
        try:
            r = requests.get(url)
        except requests.exceptions.HTTPError as http_err:
            print(f'HTTP error occurred: {http_err}')  # 处理HTTP错误
            print("error url:", url)
            global_error_list.append(url)
            continue
        except requests.exceptions.ConnectionError as conn_err:
            print(f'Error connecting: {conn_err}')  # 处理连接错误
            print("error url:", url)
            global_error_list.append(url)
            continue
        except requests.exceptions.Timeout as timeout_err:
            print(f'Timeout error: {timeout_err}')  # 处理超时错误
            print("error url:", url)
            global_error_list.append(url)
            continue
        except requests.exceptions.RequestException as err:
            print(f'An error occurred: {err}')  # 处理其他类型的请求异常
            print("error url:", url)
            global_error_list.append(url)
            continue
        else:
            # 请求成功，此处可以继续处理响应r
            pass
        r.encoding = 'utf-8'
        if r.status_code != 200:
            print(r.status_code)
            return
        json_data = json.loads(r.text)
        if len(json_data) > 0:
            for review in json_data:
                review_filepath = review['path']   
                if review_filepath == None: 
                    continue
                extension = os.path.splitext(review_filepath)[-1].lower()
                # if True:
                if extension in ['.c', '.cpp', '.cc', '.h', '.hh', '.hpp', '.cxx', '.hxx', '.c++']:
                # if review_filepath.endswith('.cpp') or review_filepath.endswith('.h'):

                    review_newline = review['new_line']
                    review_body = review['body']
                    print('-----------------------------------------------------------')
                    print(repo, pr_number, review_body, review_filepath, review_newline)
                    global_review_list.append(review_body)
                    with open(os.path.join(os.getcwd(), owner, repo, str(pr_number) + '_pr_review.json'), 'a', newline='', encoding='utf-8') as f:
                        json.dump(review, f, ensure_ascii=False, indent=4)
                    print('---------------------write success--------------------------')
        else:
            break
        page += 1

def main():
    org = 'openharmony'
    fetch_repo_from_org(org)
    with open(os.path.join(os.getcwd(), 'all_review_list.csv'), 'a', newline='', encoding='utf-8') as f: 
        writer = csv.writer(f)
        writer.writerow(["num", "review"]) 
        review_num = 1
        for item in global_review_list:
            single_line_ltem = item.replace('\n', ' ')
            writer.writerow([review_num, single_line_ltem]) 
            review_num += 1
    # global_error_list存储到文件
    with open(os.path.join(os.getcwd(), 'error_list.txt'), 'a') as f: 
        for item in global_error_list:
            f.write("%s\n" % item)

if __name__ == '__main__':
    main()
