import re
import subprocess
import sys
import argparse
from collections import defaultdict
import datetime
import os

f = None
branch_need = None
# 定义表格列宽（根据内容长度调整，确保对齐）
col_width = 15  # 每列固定宽度（可根据实际内容增减）
col_repo = col_width * 4
col_num = col_width - 1
diff_path_temp = None


def get_commit_info(repo_path, commit_hash, format_str=None):
    """
    根据提交哈希查询 Git 提交记录信息

    参数:
        repo_path (str): 仓库路径
        commit_hash (str): 提交哈希（完整或缩写）
        format_str (str): 自定义输出格式（默认包含常用信息）

    返回:
        dict: 提交信息字典（包含哈希、作者、时间、信息等字段）
              或 None（提交不存在时）
    """
    # 默认格式：包含哈希、作者、邮箱、时间、提交者、提交信息、父提交
    default_format = (
        "%H|%an|%ae|%ad|%cn|%ce|%cd|%s|%p"
    )
    format_str = format_str or default_format

    try:
        # 执行 git show 命令获取提交信息
        cmd = [
            "git", "-C", repo_path,
            "show", "--pretty=format:" + format_str,
            "--no-patch",  # 不显示差异内容（仅元数据）
            commit_hash
        ]
        output = subprocess.check_output(
            cmd, text=True, stderr=subprocess.PIPE
        ).strip()

        # 解析输出（按 | 分割字段）
        fields = output.split("|")
        if len(fields) != 9:  # 默认格式有9个字段
            raise ValueError("提交信息格式解析失败")

        # 构造提交信息字典
        commit_info = {
            "hash": fields[0],  # 完整提交哈希
            "author_name": fields[1],  # 作者姓名
            "author_email": fields[2],  # 作者邮箱
            "author_date": fields[3],  # 作者日期（Unix时间戳）
            "committer_name": fields[4],  # 提交者姓名
            "committer_email": fields[5],  # 提交者邮箱
            "committer_date": fields[6],  # 提交者日期（Unix时间戳）
            "message": fields[7],  # 提交信息
            "parent_hashes": fields[8].split()  # 父提交哈希列表（可能有多个）
        }

        return commit_info

    except subprocess.CalledProcessError as e:
        # 提交不存在或命令执行失败
        print(f"错误：查询提交 {commit_hash} 失败 - {e.stderr.strip()}")
        return None
    except Exception as e:
        print(f"错误：解析提交信息失败 - {str(e)}")
        return None


def run_git_command(repo_path, command):
    try:
        result = subprocess.run(
            command,
            cwd=repo_path,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            text=False,  # 保留原始字节数据
            shell=True  # 仅当命令可信时使用
        )

        if result.returncode != 0:
            return None

        # 命令成功时返回 stdout 解码结果（可选）
        return result.stdout.decode("utf-8", errors="replace")  # 假设 stdout 是 UTF-8

    except Exception as e:
        print(f"异常：处理仓库 '{repo_path}' 时发生错误")
        print(str(e))
        return None


def classify_line(line):
    """判断行类型：代码/注释/空行"""
    stripped = line.strip()
    if not stripped:
        return "blank"
    elif stripped.startswith("/") or stripped.startswith("*"):
        return "comment"
    else:
        return "code"


def parse_diff(diff_text):
    """解析Git diff输出，统计文件详细变更"""
    file_stats = defaultdict(lambda: {
        "added": {"code": 0, "comment": 0, "blank": 0},
        "removed": {"code": 0, "comment": 0, "blank": 0}
    })

    current_file = None
    in_binary = False

    for line in diff_text.splitlines():
        if line.startswith("Binary files"):
            in_binary = True
            continue
        if in_binary and (line.startswith("---") or line.startswith("+++")):
            in_binary = False

        # 处理文件名
        if line.startswith(("+++ ", "--- ")) and not in_binary:
            filename = line.split()[1][2:]
            # 跳过空文件名或无效路径
            if not filename or filename == "ev/null":
                continue
            current_file = filename

        # 初始化文件统计
        if current_file and current_file not in file_stats:
            file_stats[current_file] = {
                "added": {"code": 0, "comment": 0, "blank": 0},
                "removed": {"code": 0, "comment": 0, "blank": 0}
            }

        # 统计变更行
        if line.startswith("+") and not line.startswith("+++"):
            content = line[1:].strip()
            file_stats[current_file]["added"][classify_line(content)] += 1

        elif line.startswith("-") and not line.startswith("---"):
            content = line[1:].strip()
            file_stats[current_file]["removed"][classify_line(content)] += 1

    return dict(file_stats)


def format_stats(repo_name, stats, author):
    """格式化输出统计结果"""
    output = [f"\n仓库：{repo_name}"]

    # 统计汇总
    totals = {"added": {"code": 0, "comment": 0, "blank": 0}, "removed": {"code": 0, "comment": 0, "blank": 0}}
    if isinstance(stats, list):
        for item in stats:
            total(item, totals)
        # 输出统计
        output_totals(output, totals)

        # 输出文件明细
        output.append("文件变更明细：")
        merged_dict = merge_dicts(stats)
        dict_items = merged_dict.items()
        sorted_items = sorted(dict_items, key=lambda x: sum(x[1]['added'].values()) + sum(x[1]['removed'].values()),
                              reverse=True)
        output_details(sorted_items, output)
        return "\n".join(output), totals
    else:
        total(stats, totals)

        # 输出统计
        output_totals(output, totals)

        # 输出文件明细
        output.append("文件变更明细：")
        sorted_items = sorted(stats.items(), key=lambda x: sum(x[1]['added'].values()) + sum(x[1]['removed'].values()),
                              reverse=True)
        output_details(sorted_items, output)
        return "\n".join(output), totals


def merge_dicts(input_list):
    # 初始化结果字典，用于存储合并后的数据
    merged = {}

    # 遍历输入列表中的每一个原始字典
    for item in input_list:
        # 遍历当前字典中的每一个文件路径（键）及其对应的统计信息（值）
        for path, stats in item.items():
            # 如果路径尚未存在于合并结果中，初始化其统计结构
            if path not in merged:
                merged[path] = {
                    "added": {"blank": 0, "code": 0, "comment": 0},
                    "removed": {"blank": 0, "code": 0, "comment": 0}
                }
            # 累加当前文件的 added 统计数据
            merged[path]["added"]["blank"] += stats["added"]["blank"]
            merged[path]["added"]["code"] += stats["added"]["code"]
            merged[path]["added"]["comment"] += stats["added"]["comment"]

            # 累加当前文件的 removed 统计数据
            merged[path]["removed"]["blank"] += stats["removed"]["blank"]
            merged[path]["removed"]["code"] += stats["removed"]["code"]
            merged[path]["removed"]["comment"] += stats["removed"]["comment"]

    return merged


width = 7


def output_details(items, output):
    for filename, stats in items:
        output.append(
            f" | 总变更行数：{sum(stats['added'].values()) + sum(stats['removed'].values()):<{width}} | 新增代码行：{stats['added']['code']:<{width}} | 新增注释行：{stats['added']['comment']:<{width}} | 新增空行：{stats['added']['blank']:<{width}} | 删除代码行：{stats['removed']['code']:<{width}} | 删除注释行：{stats['removed']['comment']:<{width}} | 删除空行：{stats['removed']['blank']:<{width}} | 文件：{filename}")


def output_totals(output, totals):
    output.append(
        f" | 总变更行数：{sum(totals['added'].values()) + sum(totals['removed'].values()):<{width}} | 新增代码行：{totals['added']['code']:<{width}} | 新增注释行：{totals['added']['comment']:<{width}} | 新增空行：{totals['added']['blank']:<{width}} | 删除代码行：{totals['removed']['code']:<{width}} | 删除注释行：{totals['removed']['comment']:<{width}} | 删除空行：{totals['removed']['blank']}")


def total(item, totals):
    for file_stats in item.values():
        added = file_stats["added"]
        removed = file_stats["removed"]
        totals["added"]["code"] += added.get("code", 0)
        totals["added"]["comment"] += added.get("comment", 0)
        totals["added"]["blank"] += added.get("blank", 0)
        totals["removed"]["code"] += removed.get("code", 0)
        totals["removed"]["comment"] += removed.get("comment", 0)
        totals["removed"]["blank"] += removed.get("blank", 0)


def process_repo(repo_path, since, until, author, is_pull=True):
    """处理单个仓库的完整流程"""
    if not os.path.isdir(repo_path) or ".git" not in os.listdir(repo_path):
        print(f"警告：'{repo_path}' 不是有效的Git仓库")
        return None

    # 拉取代码
    if is_pull:
        fetch_code(repo_path)

    if not branch_need:
        # 原有的git log命令（自动包含所有本地分支）
        diff_cmd = f"git log --all --no-merges --author={author} --pretty=format:'' --since='{since}' --until='{until}' -p"
        diff_output = run_git_command(repo_path, diff_cmd)
        if diff_output:
            if f:
                f.write(diff_output)
            # 解析差异并统计代码变更
            return parse_diff(diff_output)
        return None
    else:
        # 获取合并到目标分支的提交记录
        merge_cmd = f"git log origin/{branch_need} --pretty=format:'%H|%s|%ct' --merges --since='{since}' --until='{until}'"
        merge_output = run_git_command(repo_path, merge_cmd)
        if merge_output:
            stats_merged = []
            merge_commits = merge_output.strip().splitlines()
            for merge_line in merge_commits:
                # print(f"\n正在分析合并提交：{merge_commit}")

                # 解析提交哈希和提交信息（格式："哈希|信息"）
                parts = merge_line.split("|", 2)
                if len(parts) != 3:
                    continue  # 无效格式，跳过
                merge_commit, commit_msg, commit_time = parts

                if not re.search(fr"Merge branch '.*?' into '{branch_need}'", commit_msg):
                    continue

                # 获取合并提交的父提交
                parent1 = get_merge_parents(repo_path, merge_commit)
                # print()
                # print(get_commit_info(repo_path, merge_commit))
                # print(get_commit_info(repo_path, parent1))
                # print(get_commit_info(repo_path, parent2))
                if not parent1:
                    continue

                # 获取合并带来的差异（master_parent 到 merge_commit 的变更）
                diff_output = get_merge_diff(repo_path, merge_commit, parent1, author)
                if not diff_output:
                    continue
                if f:
                    f.write(diff_output)
                # 解析差异并统计代码变更
                stats = parse_diff(diff_output)
                # print(stats)
                stats_merged.append(stats)
            return stats_merged
        return None


def get_merge_parents(repo_path, merge_commit):
    """获取合并提交的两个父提交哈希"""
    try:
        # 使用 git show 获取父提交信息（格式：%P 表示父提交哈希列表）
        parent_cmd = [
            "git", "-C", repo_path, "show", "--pretty=format:%P", merge_commit
        ]
        parent_output = subprocess.check_output(
            parent_cmd, text=True, stderr=subprocess.PIPE
        ).strip()

        # 父提交格式为 "parent1 parent2"（合并提交有两个父提交）
        parents = parent_output.split()
        return parents[0]
    except Exception as e:
        print(f"错误：获取合并提交 {merge_commit} 父提交失败 - {str(e)}")
        return None, None


def get_merge_diff(repo_path, merge_commit, master_parent, author):
    """获取合并提交相对于 master 父提交的差异（可选筛选作者）"""
    try:
        # 步骤1：获取合并提交到 master_parent 的所有提交哈希（完整提交历史）
        log_cmd = [
            "git", "-C", repo_path,
            "log", "--pretty=format:%H|%ct|%s|%an|%P",  # 输出哈希和时间戳（用 | 分隔）
            f"{master_parent}...{merge_commit}"  # 比较两个提交的最近公共祖先
        ]
        all_commits_with_time = subprocess.check_output(
            log_cmd, text=True, stderr=subprocess.PIPE
        ).strip().splitlines()

        # 步骤2：解析哈希和时间戳，并筛选目标作者
        filtered_commits = []  # 存储 (哈希, 时间戳) 元组
        for line in all_commits_with_time:
            commit_hash, commit_time, commit_msg, commit_author, parents = line.split("|", 4)  # 分割哈希和时间戳
            parents = parents.split(' ')
            if is_merge(commit_msg) or len(parents) > 1:
                continue
            commit_time = int(commit_time)  # 转换为整数时间戳

            # 筛选目标作者（或所有作者）
            if commit_author == author:
                filtered_commits.append((commit_hash, commit_time))

        # 步骤3：如果没有符合条件的提交，返回空
        if not filtered_commits:
            return ""

        # 步骤4：按时间戳排序（从旧到新）
        filtered_commits.sort(key=lambda x: x[1])  # 按时间戳升序排列
        # for filtered_commit in filtered_commits:
        #     print(get_commit_info(repo_path, filtered_commit[0]))
        # 步骤5：生成这些提交的差异（合并为一个整体差异）
        total_diff = ""
        for commit_hash, _ in filtered_commits:
            commit_diff_str = ''
            diff_cmd = [
                "git", "-C", repo_path,
                "show", commit_hash, '-p'
            ]
            commit_diff = subprocess.check_output(
                diff_cmd, text=True, stderr=subprocess.PIPE
            ).strip().splitlines()
            for line in commit_diff:
                commit_diff_str += line + "\n"
            total_diff += commit_diff_str + "\n"
        # 步骤6：返回合并后的差异（去除重复的边界行）
        return total_diff.strip()
    except Exception as e:
        print(f"错误： {str(e)}")
        return None


def is_merge(commit_msg):
    """判断合并提交"""
    # 正则表达式匹配 "Merge branch '...' into '...'" 格式
    pattern = r"Merge branch '.*?' into '.*?'"
    return re.search(pattern, commit_msg) is not None


def fetch_code(repo_path):
    # 新增：带时间过滤的分支拉取
    try:
        run_git_command(repo_path, 'git fetch origin')
    except Exception as e:
        print(f"警告：仓库 '{repo_path}' 分支检查失败")
        print(str(e))


def main():
    parser = argparse.ArgumentParser(description='多仓库精准代码变更统计工具')
    parser.add_argument('--repos', required=True, help='逗号分隔的仓库路径列表')
    parser.add_argument('--authors', required=True, help='逗号分隔的提交人列表')
    parser.add_argument('--since', required=True, help='起始日期 (YYYY-MM-DD)')
    parser.add_argument('--until', required=True, help='结束日期 (YYYY-MM-DD)')
    parser.add_argument('--branch', required=False)
    args = parser.parse_args()

    # 验证日期格式
    try:
        datetime.datetime.strptime(args.since, '%Y-%m-%d')
        datetime.datetime.strptime(args.until, '%Y-%m-%d')
    except ValueError:
        print("错误：日期格式应为YYYY-MM-DD")
        sys.exit(1)

    repos = [r.strip() for r in args.repos.split(",") if r.strip()]
    authors = [a.strip() for a in args.authors.split(",") if a.strip()]

    global branch_need
    branch_need = args.branch

    for author in authors:
        all_stats = []
        for repo in repos:
            stats = process_repo(repo, args.since + ' 00:00:00 +0800', args.until + ' 00:00:00 +0800', author)
            if stats:
                all_stats.append((repo, stats))
        # 输出汇总结果
        if not all_stats:
            continue
        print('=' * 300)
        gather_total = []
        sum_total = 0
        for repo, stats in all_stats:
            output, totals = format_stats(repo, stats, author)
            gather_total.append((repo, totals, output))
        for _, totals, _ in gather_total:
            sum_total += sum(totals["added"].values()) + sum(totals["removed"].values())
        print(f"提交人：{author} - 总行数：{sum_total}\n")
        for repo, totals, _ in gather_total:
            # 打印表格标题（左对齐）
            print(
                f"| 仓库：{repo:<{col_repo}}|"  # 仓库名列宽加倍（内容较长）
                f"总变更行数：{sum(totals['added'].values()) + sum(totals['removed'].values()):<{col_num}}|"
                f"新增代码行：{totals['added']['code']:<{col_num}}|"
                f"新增注释行：{totals['added']['comment']:<{col_num}}|"
                f"新增空行：{totals['added']['blank']:<{col_num}}|"
                f"删除代码行：{totals['removed']['code']:<{col_num}}|"
                f"删除注释行：{totals['removed']['comment']:<{col_num}}|"
                f"删除空行：{totals['removed']['blank']:<{col_num}}|"
            )
        for *_, output in gather_total:
            print(output)


if __name__ == "__main__":
    try:
        os.remove('diff_result.txt')
    except:
        pass
    f = open('diff_result.txt', 'a', encoding='utf-8')
    main()
    f.close()


def count_code(repos, authors, since, until, branch, diff_path, is_pull):
    global diff_path_temp
    diff_path_temp = diff_path

    # 验证日期格式
    try:
        datetime.datetime.strptime(since, '%Y-%m-%d')
        datetime.datetime.strptime(until, '%Y-%m-%d')
    except ValueError:
        print("错误：日期格式应为YYYY-MM-DD")
        raise Exception("错误：日期格式应为YYYY-MM-DD")

    if diff_path_temp:
        diff_result_txt_path = diff_path_temp + '/diff_result.txt'
        try:
            os.remove(diff_result_txt_path)
        except:
            pass
        global f
        f = open(diff_result_txt_path, 'a', encoding='utf-8')

    global branch_need
    branch_need = branch
    result = []
    for author in authors:
        all_stats = []
        for repo in repos:
            stats = process_repo(repo, since + ' 00:00:00 +0800', until + ' 00:00:00 +0800', author, is_pull)
            if stats:
                all_stats.append((repo, stats))
        # 输出汇总结果
        if not all_stats:
            continue
        result.append((author, all_stats))
    return result
