import random
import re


def get_diff_segments(diff_text):
    """
    根据diff的具体内容，将其拆分成不同的段落，给出段落的起始和结束行号，包括old_start，old_end，new_start，new_end
    """
    if not diff_text:
        return []

    segments = []
    lines = diff_text.split('\n')

    i = 0
    while i < len(lines):
        line = lines[i]
        # 查找 @@ -old_start,old_count +new_start,new_count @@ 格式的行
        if line.startswith('@@'):
            # 解析 hunk 头部信息
            header_parts = line.split('@@')[1].strip()
            parts = header_parts.split()
            if len(parts) < 2 or header_parts.startswith('-') is False:
                i += 1
                continue
            elif len(parts) > 2:
                header_parts = parts[0] + ' ' + parts[1]
                old_part, new_part = header_parts.split()
            else:
                old_part, new_part = header_parts.split()

            # 提取 old_start 和 old_count
            old_info = old_part[1:].split(',') if len(old_part) > 1 else ['-1', '-1']

            old_start = int(old_info[0])
            old_count = int(old_info[1]) if len(old_info) > 1 and old_info[1] != '' else 1

            # 提取 new_start 和 new_count
            new_info = new_part[1:].split(',')
            new_start = int(new_info[0]) if new_info[0] else -1
            new_count = int(new_info[1]) if len(new_info) > 1 and new_info[1] != '' else -1

            # 计算结束行号
            old_end = old_start + old_count - 1 if old_count > 0 else old_start
            new_end = new_start + new_count - 1 if new_count > 0 else new_start

            # 添加段落信息
            segments.append({
                'old_start': old_start,
                'old_end': old_end,
                'new_start': new_start,
                'new_end': new_end,
                'is_commented': False  # 初始时假设没有评论
            })

        i += 1

    return segments


def extract_diff_code_snippet(diff):
    """
    将diff片段拆分为修改前和修改后的代码片段
    :param diff: 包含diff信息的字符串
    :return: tuple(修改前的代码片段, 修改后的代码片段)
    """
    if not diff:
        return "", ""

    lines = diff.split('\n')
    before_lines = []
    after_lines = []

    # 跳过第一行的 @@ header
    for line in lines[1:]:
        if line.startswith('-'):
            # 删除的行，只添加到修改前行
            before_lines.append(line[1:])
        elif line.startswith('+'):
            # 新增的行，只添加到修改后行
            after_lines.append(line[1:])
        else:
            # 未变化的行，同时添加到两个版本
            content = line[1:] if line.startswith(' ') else line
            before_lines.append(content)
            after_lines.append(content)

    return '\n'.join(before_lines), '\n'.join(after_lines)


def extract_refinement_code_snippet(temp_refinement):
    """
    从 temp_refinement 中提取和 diff_comment 对应的那一小段 diff，而不是整个 patch
    :param temp_refinement: 上面你贴的那个 dict
    :param context_lines: 往前往后各取多少行做上下文
    :return: str，统一 diff 片段（可能只有几行）
    """
    diff_comment = temp_refinement.get("diff_comment") or {}
    # 1. 拿到评论对应的新文件行号
    pos = diff_comment.get("diff_position") or diff_comment.get("position") or {}
    start_new_line = pos.get("start_new_line")
    end_new_line = pos.get("end_new_line")
    start_old_line = pos.get("start_old_line")
    end_old_line = pos.get("end_old_line")

    if not start_new_line and not start_old_line:
        return None, None, None  # 没有行号就没法精确定位

    # 2. 拿到这个文件的 patch（优先 before_file，因为评论一般是对修改前的文件说的）
    before_file = temp_refinement.get("before_file") or {}
    before_patch = before_file["patch"]
    # 3. 把 unified diff 解析成带行号的结构
    before_hunks = _parse_unified_diff_with_line_numbers(before_patch)

    # 4. 在所有 hunk 中找到那一行
    for before_hunk in before_hunks:
        for idx, line_obj in enumerate(before_hunk["lines"]):
            # line_obj 结构：{"text": "+ xxx", "old_lineno": ..., "new_lineno": ...}
            if start_new_line is not None and end_new_line is not None:
                if line_obj["new_lineno"] is None:
                    continue
                if line_obj["new_lineno"] >= start_new_line and line_obj["new_lineno"] <= end_new_line:
                    snippet_lines = [before_hunk["header"]]
                    for lo in before_hunk["lines"]:
                        snippet_lines.append(lo["text"])
                    before_diff = "\n".join(snippet_lines)
                    break
            elif start_old_line is not None and end_old_line:
                if line_obj["old_lineno"] is None:
                    continue
                if line_obj["old_lineno"] >= start_old_line and line_obj["old_lineno"] <= end_old_line:
                    snippet_lines = [before_hunk["header"]]
                    for lo in before_hunk["lines"]:
                        snippet_lines.append(lo["text"])
                    before_diff = "\n".join(snippet_lines)
                    break

    after_file = temp_refinement.get("after_file") or {}
    after_patch = after_file["patch"]
    after_hunks = _parse_unified_diff_with_line_numbers(after_patch)

    # 4. 在所有 hunk 中找到那一行
    for after_hunk in after_hunks:
        for idx, line_obj in enumerate(after_hunk["lines"]):
            # line_obj 结构：{"text": "+ xxx", "old_lineno": ..., "new_lineno": ...}
            if start_new_line is not None and end_new_line is not None:
                if line_obj["new_lineno"] is None:
                    continue
                if line_obj["new_lineno"] >= start_new_line and line_obj["new_lineno"] <= end_new_line:
                    snippet_lines = [after_hunk["header"]]
                    for lo in after_hunk["lines"]:
                        snippet_lines.append(lo["text"])
                    after_diff = "\n".join(snippet_lines)
                    break
            elif start_old_line is not None and end_old_line:
                if line_obj["old_lineno"] is None:
                    continue
                if line_obj["old_lineno"] >= start_old_line and line_obj["old_lineno"] <= end_old_line:
                    snippet_lines = [after_hunk["header"]]
                    for lo in after_hunk["lines"]:
                        snippet_lines.append(lo["text"])
                    after_diff = "\n".join(snippet_lines)
                    break
    if before_diff and after_diff:
        original_code, before_code1 = extract_diff_code_snippet(before_diff)
        before_code2, after_code = extract_diff_code_snippet(after_diff)
        return original_code, before_code1, after_code
    else:
        # 没找到就返回空
        return None, None, None


def extract_comment_diff_snippet(temp_refinement):
    """
    从 temp_refinement 中提取和 diff_comment 对应的那一小段 diff，而不是整个 patch
    :param temp_refinement: 上面你贴的那个 dict
    :param context_lines: 往前往后各取多少行做上下文
    :return: str，统一 diff 片段（可能只有几行）
    """
    diff_comment = temp_refinement.get("diff_comment") or {}
    # 1. 拿到评论对应的新文件行号
    pos = diff_comment.get("diff_position") or diff_comment.get("position") or {}
    start_new_line = pos.get("start_new_line")
    end_new_line = pos.get("end_new_line")
    start_old_line = pos.get("start_old_line")
    end_old_line = pos.get("end_old_line")

    if not start_new_line and not start_old_line:
        return None  # 没有行号就没法精确定位

    # 2. 拿到这个文件的 patch（优先 before_file，因为评论一般是对修改前的文件说的）
    before_file = temp_refinement.get("before_file") or {}

    patch = before_file["patch"]

    # 3. 把 unified diff 解析成带行号的结构
    hunks = _parse_unified_diff_with_line_numbers(patch)

    # 4. 在所有 hunk 中找到那一行
    for hunk in hunks:
        for idx, line_obj in enumerate(hunk["lines"]):
            # line_obj 结构：{"text": "+ xxx", "old_lineno": ..., "new_lineno": ...}
            if start_new_line is not None and end_new_line is not None:
                if line_obj["new_lineno"] is None:
                    continue
                if line_obj["new_lineno"] >= start_new_line and line_obj["new_lineno"] <= end_new_line:
                    snippet_lines = [hunk["header"]]
                    for lo in hunk["lines"]:
                        snippet_lines.append(lo["text"])
                    return "\n".join(snippet_lines)
            elif start_old_line is not None and end_old_line:
                if line_obj["old_lineno"] is None:
                    continue
                if line_obj["old_lineno"] >= start_old_line and line_obj["old_lineno"] <= end_old_line:
                    snippet_lines = [hunk["header"]]
                    for lo in hunk["lines"]:
                        snippet_lines.append(lo["text"])
                    return "\n".join(snippet_lines)

    # 没找到就返回空
    return None


def _parse_unified_diff_with_line_numbers(patch_text):
    """
    把一个统一 diff（只有一个文件的那种）拆成多个 hunk，
    且给每一行标注 old/new 的行号，方便后面按 new_line 去查。
    返回值大概是这样的：
    [
        {
            "header": "@@ -458,10 +458,13 @@ public:",
            "lines": [
                {"text": " ...", "old_lineno": 458, "new_lineno": 458},
                {"text": "+ added", "old_lineno": None, "new_lineno": 459},
                ...
            ]
        },
        ...
    ]
    """
    lines = patch_text.splitlines()
    hunks = []
    i = 0
    current_hunk = None

    header_regex = re.compile(r"@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@")

    while i < len(lines):
        line = lines[i]
        if line.startswith("@@"):
            m = header_regex.match(line)
            if not m:
                i += 1
                continue
            old_start = int(m.group(1))
            old_len = int(m.group(2)) if m.group(2) else 1
            new_start = int(m.group(3))
            new_len = int(m.group(4)) if m.group(4) else 1

            current_hunk = {
                "header": line,
                "lines": [],
                "old_cur": old_start,
                "new_cur": new_start,
            }
            hunks.append(current_hunk)
            i += 1
            continue

        if current_hunk is None:
            # 还没遇到 @@ 的前面部分，直接跳过
            i += 1
            continue

        # 在 hunk 里解析每一行
        prefix = line[:1] if line else ""
        old_lineno = None
        new_lineno = None

        if prefix == " ":
            old_lineno = current_hunk["old_cur"]
            new_lineno = current_hunk["new_cur"]
            current_hunk["old_cur"] += 1
            current_hunk["new_cur"] += 1
        elif prefix == "-":
            old_lineno = current_hunk["old_cur"]
            current_hunk["old_cur"] += 1
        elif prefix == "+":
            new_lineno = current_hunk["new_cur"]
            current_hunk["new_cur"] += 1
        else:
            # 理论上不会到这，先当作上下文行
            old_lineno = current_hunk["old_cur"]
            new_lineno = current_hunk["new_cur"]
            current_hunk["old_cur"] += 1
            current_hunk["new_cur"] += 1

        current_hunk["lines"].append({
            "text": line,
            "old_lineno": old_lineno,
            "new_lineno": new_lineno
        })
        i += 1

    return hunks


def extract_random_diff_snippet(diff_text):
    """从一个完整的 unified diff 中随意选取一小段，用于构造不需要人工check的样本的diff片段。"""
    if not diff_text:
        return ""
    lines = diff_text.splitlines()
    hunks = []
    cur_hunk = None
    for line in lines:
        if line.startswith("@@"):
            # 遇到一个新的 hunk
            cur_hunk = {
                "header": line,
                "lines": []
            }
            hunks.append(cur_hunk)
        else:
            if cur_hunk is None:
                # diff 前面的 index/---/+++ 先忽略
                continue
            cur_hunk["lines"].append(line)

    # 如果根本不是标准 diff，就直接取前几行
    if not hunks:
        return ""

    # 1) 随机选一个 hunk
    hunk = random.choice(hunks)
    snippet_lines = [hunk["header"]]
    snippet_lines.extend(hunk["lines"])
    return "\n".join(snippet_lines)


def extract_random_code_snippet(diff_text):
    """从一个完整的 unified diff 中随意选取一小段，用于构造不需要人工check的样本的diff片段。"""
    if not diff_text:
        return "", ""
    lines = diff_text.splitlines()
    hunks = []
    cur_hunk = None
    for line in lines:
        if line.startswith("@@"):
            # 遇到一个新的 hunk
            cur_hunk = {
                "header": line,
                "lines": []
            }
            hunks.append(cur_hunk)
        else:
            if cur_hunk is None:
                # diff 前面的 index/---/+++ 先忽略
                continue
            cur_hunk["lines"].append(line)

    # 如果根本不是标准 diff，就直接取前几行
    if not hunks:
        return "", ""

    # 1) 随机选一个 hunk
    hunk = random.choice(hunks)
    snippet_lines = [hunk["header"]]
    snippet_lines.extend(hunk["lines"])
    diff_snippet = "\n".join(snippet_lines)
    before_code, after_code = extract_diff_code_snippet(diff_snippet)
    return before_code, after_code
