import configparser
import gitlab
import pandas as pd
import pickle
from collections import defaultdict
from datetime import datetime
from tabulate import tabulate


def process_single_commit(commit, project, all_commit_count, all_commit_time, all_commit_file_type, all_added_lines, all_deleted_lines, all_net_lines):
    try:
        author_name = commit.author_name

        # Process commit count
        commit_count = all_commit_count.setdefault(project.name, defaultdict(int))
        commit_count[author_name] += 1

        # Process commit time
        commit_date = datetime.strptime(commit.committed_date, '%Y-%m-%dT%H:%M:%S.%f%z').date()
        commit_time_distribution = defaultdict(lambda: defaultdict(int))
        commit_time_distribution[author_name][commit_date] += 1
        time_distribution_data = []
        for author, date_counts in commit_time_distribution.items():
            for date, count in date_counts.items():
                time_distribution_data.append([project.name, author, date, count])
        all_commit_time.extend(time_distribution_data)

        # Process commit file type
        file_type_count = defaultdict(lambda: defaultdict(int))
        total_files = defaultdict(int)
        commit_details = project.commits.get(commit.id)
        for change in commit_details.diff(get_all=True):
            file_path = change['new_path']
            file_extension = file_path.split('.')[-1] if '.' in file_path else 'unknown'
            file_type_count[author_name][file_extension] += 1
            total_files[author_name] += 1
        file_type_data = []
        for author, types in file_type_count.items():
            for file_type, count in types.items():
                percentage = (count / total_files[author]) * 100
                file_type_data.append([project.name, author, file_type, count, f"{percentage:.2f}%"])
        all_commit_file_type.extend(file_type_data)

        # Process lines data
        added_lines = all_added_lines.setdefault(project.name, defaultdict(int))
        deleted_lines = all_deleted_lines.setdefault(project.name, defaultdict(int))
        for change in commit_details.diff(get_all=True):
            if 'diff' in change:
                diff_lines = change['diff'].splitlines()
                for line in diff_lines:
                    if line.startswith('+') and not line.startswith('+++'):
                        added_lines[author_name] += 1
                    elif line.startswith('-') and not line.startswith('---'):
                        deleted_lines[author_name] += 1

        net_lines = all_net_lines.setdefault(project.name, {})
        net_lines[author_name] = added_lines.get(author_name, 0) - deleted_lines.get(author_name, 0)

        return all_commit_count, all_commit_time, all_commit_file_type, all_added_lines, all_deleted_lines, all_net_lines
    except Exception as e:
        print(f"Error processing commit {commit.id} in project {project.name}: {e}")
        return all_commit_count, all_commit_time, all_commit_file_type, all_added_lines, all_deleted_lines, all_net_lines


def collect_commit_data(projects):
    all_commit_count = {}
    all_commit_time = []
    all_commit_file_type = []
    all_added_lines = {}
    all_deleted_lines = {}
    all_net_lines = {}

    for project in projects:
        try:
            branches = project.branches.list()
            for branch in branches:
                try:
                    commits = project.commits.list(all=True, ref_name=branch.name)
                    for commit in commits:
                        all_commit_count, all_commit_time, all_commit_file_type, all_added_lines, all_deleted_lines, all_net_lines = process_single_commit(
                            commit, project, all_commit_count, all_commit_time, all_commit_file_type, all_added_lines, all_deleted_lines, all_net_lines
                        )
                    print(f"Processed {len(commits)} commits in branch {branch.name} in project {project.name}.")

                except Exception as e:
                    print(f"Error getting commits for branch {branch.name} in project {project.name}: {e}")
        except Exception as e:
            print(f"Error getting branches for project {project.name}: {e}")

    commit_count_dfs = {}
    try:
        for project_name, count_dict in all_commit_count.items():
            commit_count_df = pd.DataFrame.from_dict(count_dict, orient='index', columns=['提交次数']).reset_index().rename(
                columns={'index': '作者'})
            commit_count_df['项目名称'] = project_name
            commit_count_dfs[project_name] = commit_count_df
        print(f"Created {len(commit_count_dfs)} commit count DataFrames.")
    except Exception as e:
        print(f"Error creating commit count DataFrames: {e}")

    try:
        commit_time_df = pd.DataFrame(all_commit_time, columns=['项目名称', '作者', '日期', '提交次数'])
    except Exception as e:
        print(f"Error creating commit time DataFrame: {e}")
        commit_time_df = pd.DataFrame()

    try:
        commit_file_type_df = pd.DataFrame(all_commit_file_type, columns=['项目名称', '作者', '文件类型', '文件数量', '占比'])
    except Exception as e:
        print(f"Error creating commit file type DataFrame: {e}")
        commit_file_type_df = pd.DataFrame()

    all_added_lines_dfs = {}
    all_deleted_lines_dfs = {}
    all_net_lines_dfs = {}
    try:
        for project_name in all_added_lines:
            added_lines_df = pd.DataFrame.from_dict(all_added_lines[project_name], orient='index', columns=['新增行数']).reset_index().rename(
                columns={'index': '作者'})
            added_lines_df['项目名称'] = project.name
            all_added_lines_dfs[project_name] = added_lines_df

            deleted_lines_df = pd.DataFrame.from_dict(all_deleted_lines[project_name], orient='index', columns=['删除行数']).reset_index().rename(
                columns={'index': '作者'})
            deleted_lines_df['项目名称'] = project.name
            all_deleted_lines_dfs[project_name] = deleted_lines_df

            net_lines_df = pd.DataFrame.from_dict(all_net_lines[project_name], orient='index', columns=['净代码行数变化']).reset_index().rename(
                columns={'index': '作者'})
            net_lines_df['项目名称'] = project.name
            all_net_lines_dfs[project_name] = net_lines_df
    except Exception as e:
        print(f"Error creating lines data DataFrames: {e}")

    return commit_count_dfs, commit_time_df, commit_file_type_df, all_added_lines_dfs, all_deleted_lines_dfs, all_net_lines_dfs


def collect_mr_data(projects):
    all_mr_initiation = {}
    all_mr_processing = []
    all_mr_comment = {}

    for project in projects:
        mr_count = defaultdict(int)
        merge_requests = project.mergerequests.list(all=True)
        for mr in merge_requests:
            author_name = mr.author['name']
            mr_count[author_name] += 1
        mr_initiation_df = pd.DataFrame.from_dict(mr_count, orient='index', columns=['合并请求发起数量']).reset_index().rename(
            columns={'index': '作者'})
        mr_initiation_df['项目名称'] = project.name
        all_mr_initiation[project.name] = mr_initiation_df

        mr_processing_time = defaultdict(list)
        merged_requests = project.mergerequests.list(all=True, state='merged')
        for mr in merged_requests:
            author_name = mr.author['name']
            created_at = datetime.strptime(mr.created_at, '%Y-%m-%dT%H:%M:%S.%fZ')
            merged_at = datetime.strptime(mr.merged_at, '%Y-%m-%dT%H:%M:%S.%fZ')
            processing_time = (merged_at - created_at).total_seconds()
            mr_processing_time[author_name].append(processing_time)
        mr_processing_data = []
        for author, times in mr_processing_time.items():
            average_time = sum(times) / len(times) if times else 0
            mr_processing_data.append([project.name, author, f"{average_time / 3600:.2f}"])
        mr_processing_df = pd.DataFrame(mr_processing_data, columns=['项目名称', '作者', '平均处理时间（小时）'])
        all_mr_processing.append(mr_processing_df)

        mr_comment_count = defaultdict(int)
        for mr in merge_requests:
            author_name = mr.author['name']
            discussions = mr.discussions.list(all=True)
            for discussion in discussions:
                mr_comment_count[author_name] += len(discussion.attributes['notes'])
        mr_comment_df = pd.DataFrame.from_dict(mr_comment_count, orient='index', columns=['合并请求评论数量']).reset_index().rename(
            columns={'index': '作者'})
        mr_comment_df['项目名称'] = project.name
        all_mr_comment[project.name] = mr_comment_df

    return all_mr_initiation, pd.concat(all_mr_processing, ignore_index=True), all_mr_comment


def collect_issue_data(projects):
    all_issue_creation = {}
    all_issue_resolution = {}
    all_issue_processing = []

    for project in projects:
        issue_creation_count = defaultdict(int)
        issues = project.issues.list(all=True)
        for issue in issues:
            author_name = issue.author['name']
            issue_creation_count[author_name] += 1
        issue_creation_df = pd.DataFrame.from_dict(issue_creation_count, orient='index', columns=['创建问题数量']).reset_index().rename(
            columns={'index': '作者'})
        issue_creation_df['项目名称'] = project.name
        all_issue_creation[project.name] = issue_creation_df

        issue_resolution_count = defaultdict(int)
        closed_issues = project.issues.list(all=True, state='closed')
        for issue in closed_issues:
            if issue.closed_by:
                resolver_name = issue.closed_by['name']
                issue_resolution_count[resolver_name] += 1
        issue_resolution_df = pd.DataFrame.from_dict(issue_resolution_count, orient='index', columns=['解决问题数量']).reset_index().rename(
            columns={'index': '解决者'})
        issue_resolution_df['项目名称'] = project.name
        all_issue_resolution[project.name] = issue_resolution_df

        issue_processing_time = defaultdict(list)
        for issue in closed_issues:
            if issue.closed_by:
                resolver_name = issue.closed_by['name']
                created_at = datetime.strptime(issue.created_at, '%Y-%m-%dT%H:%M:%S.%fZ')
                closed_at = datetime.strptime(issue.closed_at, '%Y-%m-%dT%H:%M:%S.%fZ')
                processing_time = (closed_at - created_at).total_seconds()
                issue_processing_time[resolver_name].append(processing_time)
        issue_processing_data = []
        for resolver, times in issue_processing_time.items():
            average_time = sum(times) / len(times) if times else 0
            issue_processing_data.append([project.name, resolver, f"{average_time / 3600:.2f}"])
        issue_processing_df = pd.DataFrame(issue_processing_data, columns=['项目名称', '解决者', '平均处理时间（小时）'])
        all_issue_processing.append(issue_processing_df)

    return all_issue_creation, all_issue_resolution, pd.concat(all_issue_processing, ignore_index=True)

def save_dataset(projects):
    commit_count_dfs, commit_time_df, commit_file_type_df, all_added_lines_dfs, all_deleted_lines_dfs, all_net_lines_dfs = collect_commit_data(projects)
    mr_initiation, mr_processing, mr_comment = collect_mr_data(projects)
    issue_creation, issue_resolution, issue_processing = collect_issue_data(projects)

    dataset = {
        'commit_count': commit_count_dfs, 
        'commit_time':  commit_time_df, 
        'commit_file_type':  commit_file_type_df,     
        'added_lines': all_added_lines_dfs,
        'deleted_lines': all_deleted_lines_dfs,
        'net_lines': all_net_lines_dfs,
        'mr_initiation': mr_initiation,
        'mr_processing': mr_processing,
        'mr_comment': mr_comment,
        'issue_creation': issue_creation,
        'issue_resolution': issue_resolution,
        'issue_processing': issue_processing
    }

    with open('software_audit_dataset.pkl', 'wb') as f:
        pickle.dump(dataset, f)
    
    return dataset


def load_dataset():
    with open('software_audit_dataset.pkl', 'rb') as f:
        dataset = pickle.load(f)
    return dataset

import hashlib

def calculate_sha256(file_path):
    # 创建 SHA-256 哈希对象
    sha256_hash = hashlib.sha256()
    try:
        # 以二进制模式打开文件
        with open(file_path, "rb") as f:
            # 分块读取文件内容
            for byte_block in iter(lambda: f.read(4096), b""):
                # 更新哈希对象
                sha256_hash.update(byte_block)
        # 获取十六进制格式的校验和
        return sha256_hash.hexdigest()
    except FileNotFoundError:
        print(f"文件 {file_path} 未找到。")
        return None

def print_sha256_checksum(file_path):
    sha256_checksum = calculate_sha256(file_path)
    if sha256_checksum:
        print(f"文件 {file_path} 的 SHA-256 校验和为: {sha256_checksum}")
    # keep log in checksums.log file
        with open('checksums.log', 'a') as f:
            # f.write(f"文件 {file_path} 的 SHA-256 校验和为: {sha256_checksum}\n")
            f.write(f"{sha256_checksum}  {file_path}\n")


# 生成 CSV 报告
def generate_csv_report(commit_count_dfs, commit_time_df, commit_file_type_df, all_added_lines_dfs, all_deleted_lines_dfs, all_net_lines_dfs, all_mr_initiation, all_mr_processing, all_mr_comment, all_issue_creation, all_issue_resolution, all_issue_processing):
    # 合并 commit 相关数据
    all_commit_count_list = []
    for project_id, df in commit_count_dfs.items():
        df['项目ID'] = project_id
        all_commit_count_list.append(df)
    combined_commit_count = pd.concat(all_commit_count_list, ignore_index=True)

    commit_time_df['项目ID'] = commit_time_df['项目名称'].map({project_id: project_id for project_id in commit_count_dfs.keys()})
    commit_file_type_df['项目ID'] = commit_file_type_df['项目名称'].map({project_id: project_id for project_id in commit_count_dfs.keys()})

    # 合并 commit 报表
    combined_commit_report = pd.merge(combined_commit_count, commit_time_df, on=['项目ID', '作者'], how='outer')
    combined_commit_report = pd.merge(combined_commit_report, commit_file_type_df, on=['项目ID', '作者'], how='outer')
    combined_commit_report = combined_commit_report.sort_values(by='项目ID')
    combined_commit_report.to_csv('combined_commit_report.csv', index=False)
    print_sha256_checksum('combined_commit_report.csv')

    # 合并 lines 相关数据
    all_added_lines_list = []
    for project_id, df in all_added_lines_dfs.items():
        df['项目ID'] = project_id
        all_added_lines_list.append(df)
    combined_added_lines = pd.concat(all_added_lines_list, ignore_index=True)

    all_deleted_lines_list = []
    for project_id, df in all_deleted_lines_dfs.items():
        df['项目ID'] = project_id
        all_deleted_lines_list.append(df)
    combined_deleted_lines = pd.concat(all_deleted_lines_list, ignore_index=True)

    all_net_lines_list = []
    for project_id, df in all_net_lines_dfs.items():
        df['项目ID'] = project_id
        all_net_lines_list.append(df)
    combined_net_lines = pd.concat(all_net_lines_list, ignore_index=True)

    # 合并 lines 报表
    combined_lines_report = pd.merge(combined_added_lines, combined_deleted_lines, on=['项目ID', '作者'], how='outer')
    combined_lines_report = pd.merge(combined_lines_report, combined_net_lines, on=['项目ID', '作者'], how='outer')
    combined_lines_report = combined_lines_report.sort_values(by='项目ID')
    combined_lines_report.to_csv('combined_lines_report.csv', index=False)
    print_sha256_checksum('combined_lines_report.csv')

    # 合并 mr 相关数据
    all_mr_initiation_list = []
    for project_id, df in all_mr_initiation.items():
        df['项目ID'] = project_id
        all_mr_initiation_list.append(df)
    combined_mr_initiation = pd.concat(all_mr_initiation_list, ignore_index=True)

    all_mr_comment_list = []
    for project_id, df in all_mr_comment.items():
        df['项目ID'] = project_id
        all_mr_comment_list.append(df)
    combined_mr_comment = pd.concat(all_mr_comment_list, ignore_index=True)

    all_mr_processing['项目ID'] = all_mr_processing['项目名称'].map({project_id: project_id for project_id in all_mr_initiation.keys()})

    # 合并 mr 报表
    combined_mr_report = pd.merge(combined_mr_initiation, all_mr_processing, on=['项目ID', '作者'], how='outer')
    combined_mr_report = pd.merge(combined_mr_report, combined_mr_comment, on=['项目ID', '作者'], how='outer')
    combined_mr_report = combined_mr_report.sort_values(by='项目ID')
    combined_mr_report.to_csv('combined_mr_report.csv', index=False)
    print_sha256_checksum('combined_mr_report.csv')

    # 合并 issue 相关数据
    all_issue_creation_list = []
    for project_id, df in all_issue_creation.items():
        df['项目ID'] = project_id
        all_issue_creation_list.append(df)
    combined_issue_creation = pd.concat(all_issue_creation_list, ignore_index=True)

    all_issue_resolution_list = []
    for project_id, df in all_issue_resolution.items():
        df['项目ID'] = project_id
        all_issue_resolution_list.append(df)
    combined_issue_resolution = pd.concat(all_issue_resolution_list, ignore_index=True)

    all_issue_processing['项目ID'] = all_issue_processing['项目名称'].map({project_id: project_id for project_id in all_issue_creation.keys()})

    # 合并 issue 报表
    combined_issue_report = pd.merge(combined_issue_creation, all_issue_processing, left_on=['项目ID', '作者'], right_on=['项目ID', '解决者'], how='outer')
    combined_issue_report = pd.merge(combined_issue_report, combined_issue_resolution, on=['项目ID', '解决者'], how='outer')
    combined_issue_report = combined_issue_report.sort_values(by='项目ID')
    combined_issue_report.to_csv('combined_issue_report.csv', index=False)
    print_sha256_checksum('combined_issue_report.csv')

# dataset['commit_time'].to_csv('commit_time_report.csv', index=False)
#     print_sha256_checksum('commit_time_report.csv')

#     dataset['commit_file_type'].to_csv('commit_file_type_report.csv', index=False)
#     print_sha256_checksum('commit_file_type_report.csv')
    
#     # 代码行数相关报表
#     # dataset['added_lines'].to_csv('added_lines_report.csv', index=False)
#     for key,df in dataset['added_lines'].items():
#         df.to_csv(f'added_lines_report_{key}.csv', index=False)
#         print_sha256_checksum(f'added_lines_report_{key}.csv')
#     # dataset['deleted_lines'].to_csv('deleted_lines_report.csv', index=False)
#     for key,df in dataset['deleted_lines'].items():
#         df.to_csv(f'deleted_lines_report_{key}.csv', index=False)
#         print_sha256_checksum(f'deleted_lines_report_{key}.csv')

#     # dataset['net_lines'].to_csv('net_lines_report.csv', index=False)
#     for key,df in dataset['net_lines'].items():
#         df.to_csv(f'net_lines_report_{key}.csv', index=False)
#         print_sha256_checksum(f'net_lines_report_{key}.csv')
#     # 合并请求相关报表
#     # dataset['mr_initiation'].to_csv('mr_initiation_report.csv', index=False)
#     for key,df in dataset['mr_initiation'].items():
#         df.to_csv(f'mr_initiation_report_{key}.csv', index=False)
#         print_sha256_checksum(f'mr_initiation_report_{key}.csv')
#     # dataset['mr_processing'].to_csv('mr_processing_report.csv', index=False)
#     # for key,df in dataset['mr_processing'].items():
#     #     df.to_csv(f'mr_processing_report_{key}.csv', index=False)
    
#     # dataset['mr_comment'].to_csv('mr_comment_report.csv', index=False)
#     for key, df in dataset['mr_comment'].items():
#         df.to_csv(f'mr_comment_report_{key}.csv', index=False)
#         print_sha256_checksum(f'mr_comment_report_{key}.csv')

#     # 问题跟踪相关报表
#     # dataset['issue_creation'].to_csv('issue_creation_report.csv', index=False)
#     for key, df in dataset['issue_creation'].items():
#         df.to_csv(f'issue_creation_report_{key}.csv', index=False)
#         print_sha256_checksum(f'issue_creation_report_{key}.csv')

#     # dataset['issue_resolution'].to_csv('issue_resolution_report.csv', index=False)
#     for key,df in dataset['issue_resolution'].items():
#         df.to_csv(f'issue_resolution_report_{key}.csv', index=False)
#         print_sha256_checksum(f'issue_creation_report_{key}.csv')
    
#     dataset['issue_processing'].to_csv('issue_processing_report.csv', index=False)
#     print_sha256_checksum('issue_processing_report.csv')


def gitlab_project_report_csv(gl: gitlab.Gitlab, project_ids):
    try:
        projects = [gl.projects.get(project_id) for project_id in project_ids]
        dataset = save_dataset(projects)
        # 拆分字典并传递参数
        generate_csv_report(
            dataset['commit_count'],
            dataset['commit_time'],
            dataset['commit_file_type'],
            dataset['added_lines'],
            dataset['deleted_lines'],
            dataset['net_lines'],
            dataset['mr_initiation'],
            dataset['mr_processing'],
            dataset['mr_comment'],
            dataset['issue_creation'],
            dataset['issue_resolution'],
            dataset['issue_processing']
        )
        print("报告已生成...")
    except gitlab.exceptions.GitlabGetError:
        print(f"无法获取部分项目，请检查项目 ID 是否正确。")
    except Exception as e:
        print(f"发生错误: {e}")




if __name__ == "__main__":
    # 创建一个配置解析器对象
    config = configparser.ConfigParser()
    # 读取配置文件
    config.read('gitlab-report.ini')

    # 从配置文件中读取 GitLab URL
    gitlab_url = config.get('gitlab', 'url', fallback='https://gitlab.com')
    # 从配置文件中读取私有令牌
    private_token = config.get('gitlab', 'private_token')
    if not private_token:
        private_token = input("请输入你的 GitLab API Token: ")
        # 更新配置文件中的私有令牌
        config.set('gitlab', 'private_token', private_token)
        with open('config.ini', 'w') as configfile:
            config.write(configfile)

    # 初始化 GitLab 客户端
    gl = gitlab.Gitlab(gitlab_url, private_token=private_token)
    gl.auth()

    # 从配置文件中读取项目 ID 列表
    project_ids_str = config.get('projects', 'ids', fallback='')
    project_ids = [int(id.strip()) for id in project_ids_str.split(',') if id.strip()]
    if not project_ids:
        print("配置文件中未提供有效的项目 ID，请检查并更新 'config.ini' 文件。")
    else:
        # 调用生成报告的函数
        gitlab_project_report_csv(gl, project_ids)

