import requests
import re
import dotenv
import pprint
import os
import subprocess
import base64
from pathlib import Path
from llm_assistant import LLMAssistant
from loguru import logger
import difflib
from difflib import SequenceMatcher
from typing import List
import html

class PatchAdapter:
    def __init__(self, similarity_threshold=0.7):
        self.similarity_threshold = similarity_threshold
        # self.base_dir = input["basedir"]


    def parse_hunk(self, hunk_text):
        # """解析patch中的一个hunk"""
        # # 解析hunk头
        # lines = hunk_text.split('\n')
        # header_match = re.match(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@(.*)', lines[0])
        # if not header_match:
        #     raise ValueError("Invalid hunk header")
        
        # # 跟踪连续的修改块
        # changes = []  # 存储所有修改块
        # current_change = {
        #     'context_before': [],
        #     'context_after': [],
        #     'removed_lines': [],
        #     'added_lines': []
        # }
        
        # lines = lines[1:]  # 跳过header行
        # i = 0
        # while i < len(lines):
        #     line = lines[i]
        #     if not line:
        #         i += 1
        #         continue
                
        #     # 处理修改行
        #     if line.startswith('-') or line.startswith('+'):
        #         # 收集前置上下文（向前看最多3行）
        #         if not current_change['removed_lines'] and not current_change['added_lines']:
        #             start = max(0, i - 3)
        #             current_change['context_before'] = [l[1:] for l in lines[start:i] if l.startswith(' ')]
                
        #         # 收集修改行
        #         if line.startswith('-'):
        #             current_change['removed_lines'].append(line[1:])
        #         else:
        #             current_change['added_lines'].append(line[1:])
                    
        #         # 查看后面的行，收集后置上下文
        #         next_change_pos = i + 1
        #         while (next_change_pos < len(lines) and 
        #             (not lines[next_change_pos] or lines[next_change_pos].startswith(' '))):
        #             if lines[next_change_pos] and lines[next_change_pos].startswith(' '):
        #                 current_change['context_after'].append(lines[next_change_pos][1:])
        #             next_change_pos += 1
                
        #         # 如果遇到下一个修改块，保存当前块并开始新的块
        #         if (next_change_pos < len(lines) and 
        #             (lines[next_change_pos].startswith('-') or lines[next_change_pos].startswith('+'))):
        #             if current_change['removed_lines'] or current_change['added_lines']:
        #                 changes.append(current_change)
        #                 current_change = {
        #                     'context_before': [],
        #                     'context_after': [],
        #                     'removed_lines': [],
        #                     'added_lines': []
        #                 }
                
        #         i = next_change_pos - 1  # 减1是因为循环末尾会加1
                
        #     i += 1
        
        # # 添加最后一个修改块
        # if current_change['removed_lines'] or current_change['added_lines']:
        #     changes.append(current_change)
        
        # return changes

        
        # """解析patch中的一个hunk"""
        # lines = hunk_text.split('\n')
        # header_match = re.match(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@(.*)', lines[0])
        # if not header_match:
        #     raise ValueError("Invalid hunk header")
        
        # hunks = []
        # current_hunk = {
        #     'context_before': [],
        #     'context_after': [],
        #     'removed_lines': [],
        #     'added_lines': []
        # }
        
        # context_lines = []
        # in_modification = False
        
        # for line in lines[1:]:
        #     # 跳过空行
        #     if not line:
        #         continue
                
        #     # 清理行中可能的特殊字符
        #     line = line.replace('``', '').rstrip()
            
        #     if line.startswith(' '):
        #         content = line[1:]
        #         if in_modification:
        #             if len(context_lines) >= 3:
        #                 current_hunk['context_after'] = context_lines[:3]
        #                 if current_hunk['removed_lines'] or current_hunk['added_lines']:
        #                     hunks.append(current_hunk)
        #                     current_hunk = {
        #                         'context_before': context_lines[-3:],
        #                         'context_after': [],
        #                         'removed_lines': [],
        #                         'added_lines': []
        #                     }
        #                 context_lines = []
        #                 in_modification = False
        #             else:
        #                 context_lines.append(content)
        #         else:
        #             context_lines.append(content)
        #     else:
        #         if not in_modification:
        #             current_hunk['context_before'] = context_lines[-3:] if context_lines else []
        #             context_lines = []
        #             in_modification = True
                
        #         if line.startswith('-'):
        #             current_hunk['removed_lines'].append(line[1:])
        #         elif line.startswith('+'):
        #             # 只有当内容不为空时才添加
        #             content = line[1:]
        #             if content and content != '``':
        #                 current_hunk['added_lines'].append(content)
        
        # # 处理最后的上下文和修改块
        # if context_lines:
        #     current_hunk['context_after'] = context_lines[:3]
        # if current_hunk['removed_lines'] or current_hunk['added_lines']:
        #     hunks.append(current_hunk)
        
        # return hunks

        # """解析patch中的一个hunk"""
        # lines = hunk_text.split('\n')
        # header_match = re.match(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@(.*)', lines[0])
        # if not header_match:
        #     raise ValueError("Invalid hunk header")
        
        # hunks = []
        # current_hunk = {
        #     'context_before': [],
        #     'context_after': [],
        #     'changes': []  # 新的结构，存储修改序列
        # }
        
        # context_lines = []
        # in_modification = False
        
        # for line in lines[1:]:
        #     if not line:
        #         continue
                
        #     line = line.replace('``', '').rstrip()
            
        #     if line.startswith(' '):
        #         content = line[1:]
        #         if in_modification:
        #             # 如果这个未修改的行在修改块中间，将其作为修改序列的一部分
        #             if len(context_lines) < 3 and content not in [c[1] for c in current_hunk['changes']]:
        #                 current_hunk['changes'].append(('keep', content))
        #                 context_lines.append(content)
        #             else:
        #                 # 结束当前修改块
        #                 current_hunk['context_after'] = context_lines[:3]
        #                 if current_hunk['changes']:
        #                     hunks.append(current_hunk)
        #                     current_hunk = {
        #                         'context_before': context_lines[-3:],
        #                         'context_after': [],
        #                         'changes': []
        #                     }
        #                 context_lines = []
        #                 in_modification = False
        #         else:
        #             context_lines.append(content)
        #     else:
        #         if not in_modification:
        #             current_hunk['context_before'] = context_lines[-3:] if context_lines else []
        #             context_lines = []
        #             in_modification = True

        #         # 处理空行
        #         if line == '-' or line == '+':
        #             if line == '+':
        #                 current_hunk['changes'].append(('add', ''))
        #             continue
                
        #         if line.startswith('-'):
        #             current_hunk['changes'].append(('remove', line[1:]))
        #         elif line.startswith('+'):
        #             content = line[1:]
        #             if content and content != '``':
        #                 current_hunk['changes'].append(('add', content))
        
        # if context_lines:
        #     current_hunk['context_after'] = context_lines[:3]
        # if current_hunk['changes']:
        #     hunks.append(current_hunk)
        
        # return hunks

        # """解析patch中的一个hunk"""
        # lines = hunk_text.split('\n')
        # header_match = re.match(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@(.*)', lines[0])
        # if not header_match:
        #     raise ValueError("Invalid hunk header")
        
        # hunks = []
        # current_hunk = {
        #     'context_before': [],
        #     'context_after': [],
        #     'changes': []
        # }
        
        # context_lines = []
        # in_modification = False
        
        # for line in lines[1:]:
        #     if not line:
        #         continue
                
        #     line = line.replace('``', '').rstrip()
            
        #     if line.startswith(' '):
        #         content = line[1:]
        #         if in_modification:
        #             # 仅在结束修改块时处理上下文
        #             current_hunk['context_after'].append(content)
        #             if len(current_hunk['context_after']) >= 3:
        #                 if current_hunk['changes']:
        #                     hunks.append(current_hunk)
        #                 current_hunk = {
        #                     'context_before': current_hunk['context_after'][-3:],
        #                     'context_after': [],
        #                     'changes': []
        #                 }
        #                 in_modification = False
        #         else:
        #             context_lines.append(content)
        #     else:
        #         if not in_modification:
        #             current_hunk['context_before'] = context_lines[-3:] if context_lines else []
        #             context_lines = []
        #             in_modification = True
        #             current_hunk['context_after'] = []
                
        #         if line == '-':
        #             current_hunk['changes'].append(('remove', ''))
        #         elif line == '+':
        #             current_hunk['changes'].append(('add', ''))
        #         elif line.startswith('-'):
        #             current_hunk['changes'].append(('remove', line[1:]))
        #         elif line.startswith('+'):
        #             content = line[1:]
        #             if content and content != '``':
        #                 current_hunk['changes'].append(('add', content))
        
        # # 处理最后的上下文
        # if context_lines:
        #     current_hunk['context_after'] = context_lines[:3]
        # if current_hunk['changes']:
        #     hunks.append(current_hunk)
        
        # return hunks

        lines = hunk_text.split('\n')
        header_match = re.match(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@(.*)', lines[0])
        if not header_match:
            raise ValueError("Invalid hunk header")
        
        # 存储解析结果
        parsed_lines = []
        current_block = []
        
        # 解析每一行
        for line in lines[1:]:
            if not line:
                continue
                
            if line.startswith('---') or line.startswith('+++'):
                continue
                
            # 根据行首字符确定类型
            if line.startswith('-'):
                parsed_lines.append(('remove', line[1:].rstrip()))
            elif line.startswith('+'):
                parsed_lines.append(('add', line[1:].rstrip()))
            elif line.startswith(' '):
                parsed_lines.append(('context', line[1:].rstrip()))
        
        # 构建hunk结构
        hunk = {
            'line_numbers': {
                'old_start': int(header_match.group(1)),
                'old_count': int(header_match.group(2)),
                'new_start': int(header_match.group(3)),
                'new_count': int(header_match.group(4))
            },
            'lines': parsed_lines
        }
        
        return [hunk]
        

    # def find_best_match_position(self, file_lines, hunk):
    #     """找到hunk最佳匹配位置"""
    #     best_position = -1
    #     best_score = 0
        
    #     # 使用删除行作为主要锚点
    #     anchor_text = '\n'.join(hunk['removed_lines'])
    #     context_before = '\n'.join(hunk['context_before'])
    #     context_after = '\n'.join(hunk['context_after'])
        
    #     # 在文件中滑动窗口寻找最佳匹配
    #     for i in range(len(file_lines)):
    #         # 检查删除行匹配
    #         potential_removed = '\n'.join(file_lines[i:i + len(hunk['removed_lines'])])
    #         removed_similarity = SequenceMatcher(None, anchor_text, potential_removed).ratio()
            
    #         if removed_similarity < self.similarity_threshold:
    #             continue
                
    #         # 检查前后上下文
    #         context_before_text = '\n'.join(file_lines[max(0, i - len(hunk['context_before'])):i])
    #         context_after_text = '\n'.join(file_lines[i + len(hunk['removed_lines']):
    #                                                 i + len(hunk['removed_lines']) + len(hunk['context_after'])])
            
    #         before_similarity = SequenceMatcher(None, context_before, context_before_text).ratio()
    #         after_similarity = SequenceMatcher(None, context_after, context_after_text).ratio()
            
    #         # 计算综合得分
    #         score = (removed_similarity * 0.65 +  # 删除行权重更高
    #                 before_similarity * 0.15 +
    #                 after_similarity * 0.2)
            
    #         if score > 0.5:  # 只打印相对高的分数
    #             logger.debug(f"Position {i} total score: {score}")
    #             logger.debug(f"Before similarity: {before_similarity}")
    #             logger.debug(f"After similarity: {after_similarity}")
                    
    #         if score > best_score:
    #             best_score = score
    #             best_position = i
                
    #     return best_position if best_score >= self.similarity_threshold else -1

    # def find_best_match_position(self, file_lines, hunk):
    #     """找到hunk最佳匹配位置"""
    #     best_position = -1
    #     best_score = 0
        
    #     # 从changes中提取删除的行
    #     removed_lines = [content for change_type, content in hunk['changes'] 
    #                     if change_type == 'remove']
    #     if not removed_lines:
    #         # 如果没有删除的行，尝试使用上下文
    #         return self.find_position_by_context(file_lines, hunk)
        
    #     # 清理行尾的空白字符
    #     file_lines = [line.rstrip() for line in file_lines]
        
    #     # 准备比较文本
    #     anchor_text = '\n'.join(removed_lines)
    #     context_before = '\n'.join(hunk['context_before'])
    #     context_after = '\n'.join(hunk['context_after'])
        
    #     # 在文件中滑动窗口寻找最佳匹配
    #     for i in range(len(file_lines)):
    #         # 检查删除行匹配
    #         potential_removed = '\n'.join(file_lines[i:i + len(removed_lines)])
    #         removed_similarity = SequenceMatcher(None, anchor_text, potential_removed).ratio()
            
    #         if removed_similarity < self.similarity_threshold:
    #             continue
                
    #         # 检查前后上下文
    #         context_before_text = '\n'.join(file_lines[max(0, i - len(hunk['context_before'])):i])
    #         context_after_text = '\n'.join(file_lines[i + len(removed_lines):
    #                                     i + len(removed_lines) + len(hunk['context_after'])])
            
    #         before_similarity = SequenceMatcher(None, context_before, context_before_text).ratio()
    #         after_similarity = SequenceMatcher(None, context_after, context_after_text).ratio()
            
    #         # 计算综合得分
    #         score = (removed_similarity * 0.7 +
    #                 before_similarity * 0.15 +
    #                 after_similarity * 0.15)
            
    #         if score > best_score:
    #             best_score = score
    #             best_position = i
                
    #     return best_position if best_score >= self.similarity_threshold else -1
    def find_best_match_position(self, file_lines, hunk):
        """找到hunk最佳匹配位置"""
        best_position = -1
        best_score = 0
        
        # 获取hunk中的上下文行
        context_lines = []
        for line_type, content in hunk['lines']:
            if line_type in ('context', 'remove'):
                context_lines.append(content)
        
        # 在文件中查找匹配
        for i in range(len(file_lines)):
            score = 0
            matches = 0
            
            # 比较每一行
            for j, context_line in enumerate(context_lines):
                if (i + j < len(file_lines) and 
                    file_lines[i + j].rstrip() == context_line):
                    matches += 1
                    
            if matches > 0:
                score = matches / len(context_lines)
                if score > best_score:
                    best_score = score
                    best_position = i
        
        return best_position if best_score >= self.similarity_threshold else -1

    def find_position_by_context(self, file_lines, hunk):
        """当没有删除行时，通过上下文查找位置"""
        context_before = '\n'.join(hunk['context_before'])
        context_after = '\n'.join(hunk['context_after'])
        
        for i in range(len(file_lines)):
            context_before_text = '\n'.join(file_lines[max(0, i - len(hunk['context_before'])):i])
            context_after_text = '\n'.join(file_lines[i:i + len(hunk['context_after'])])
            
            before_similarity = SequenceMatcher(None, context_before, context_before_text).ratio()
            after_similarity = SequenceMatcher(None, context_after, context_after_text).ratio()
            
            if before_similarity > self.similarity_threshold and after_similarity > self.similarity_threshold:
                return i
                
        return -1

    def apply_hunk(self, file_lines, position, hunk):
        # """应用hunk的修改
        
        # 保持正确的缩进和换行格式
        # """
        # if position == -1:
        #     return file_lines
            
        # # 创建新的行列表
        # new_lines = file_lines[:position]
        
        # # 获取原始缩进
        # base_indent = ''
        # if hunk['removed_lines']:
        #     # 从被删除的非空行中获取缩进
        #     for line in hunk['removed_lines']:
        #         if line.strip():  # 确保不是空行
        #             match = re.match(r'^[ \t]*', line)  # 只匹配空格和制表符，避免匹配到换行符
        #             if match:
        #                 base_indent = match.group(0)
        #                 break
        # elif position > 0:
        #     # 从前一个非空行获取缩进
        #     for i in range(position-1, -1, -1):
        #         if file_lines[i].strip():  # 确保不是空行
        #             match = re.match(r'^[ \t]*', file_lines[i])
        #             if match:
        #                 base_indent = match.group(0)
        #                 break

        # logger.debug(f"base indent length: {len(base_indent)}")
        
        # # 应用每一个新添加的行，保持正确的缩进
        # for added_line in hunk['added_lines']:
        #     if added_line.strip():  # 非空行
        #         # 获取新行的额外缩进（相对于基础缩进的额外空格）
        #         match = re.match(r'^[ \t]*', added_line)
        #         extra_indent = match.group(0) if match else ''
                
        #         # 保持原有的缩进结构
        #         final_line = base_indent + added_line[len(extra_indent):]
        #     else:
        #         # 空行保持原样
        #         final_line = added_line
                
        #     # 确保行末有换行符
        #     if not final_line.endswith('\n'):
        #         final_line += '\n'
        #     new_lines.append(final_line)
            
        #     # # 在某些语句后添加空行
        #     # if added_line.strip().endswith(('end', '}')):
        #     #     new_lines.append('\n')
    
        # # 添加剩余的行
        # new_lines.extend(file_lines[position + len(hunk['removed_lines']):])
        
        # return new_lines

        # if position == -1:
        #     return file_lines
            
        # # 创建新的行列表
        # new_lines = file_lines[:position]
        
        # # 直接使用添加的行，只处理换行符
        # for added_line in hunk['added_lines']:
        #     # 确保行末有换行符
        #     if not added_line.endswith('\n'):
        #         added_line += '\n'
        #     new_lines.append(added_line)
            
        #     # # 在某些语句后添加空行
        #     # if added_line.strip().endswith(('end', '}')):
        #     #     new_lines.append('\n')
        
        # # 添加剩余的行
        # new_lines.extend(file_lines[position + len(hunk['removed_lines']):])
        
        # return new_lines

        # """应用hunk的修改"""
        # if position == -1:
        #     return file_lines
            
        # new_lines = file_lines[:position]  # 保留修改位置之前的行
        
        # # 如果只有一个删除行和一个添加行，可能是行内修改
        # if (len(hunk['removed_lines']) == 1 and len(hunk['added_lines']) == 1 and
        #     hunk['removed_lines'][0].split('(')[0] == hunk['added_lines'][0].split('(')[0]):
        #     # 这是行内修改的情况
        #     new_lines.append(hunk['added_lines'][0] + '\n')
        #     new_lines.extend(file_lines[position + 1:])
        # else:
        #     # 添加新的行
        #     for added_line in hunk['added_lines']:
        #         new_lines.append(added_line + '\n')
            
        #     # 跳过被删除的行
        #     skip_lines = len(hunk['removed_lines'])
        #     new_lines.extend(file_lines[position + skip_lines:])
        
        # return new_lines

        # """应用hunk的修改"""
        # if position == -1:
        #     return file_lines
        
        # new_lines = file_lines[:position]
        
        # # 按序应用修改
        # for change_type, content in hunk['changes']:
        #     # 处理空行
        #     if content == '' and change_type == 'add':
        #         # 只有当前一行不是空行时才添加空行
        #         if new_lines and new_lines[-1].strip():
        #             new_lines.append('\n')
        #         continue
                
        #     if change_type in ('keep', 'add'):
        #         # 避免添加重复行
        #         if not new_lines or new_lines[-1].rstrip() != content:
        #             new_lines.append(content + '\n')

        
        # # 计算要跳过的行数（被删除的行和保持的行）
        # skip_count = len([c for c in hunk['changes'] if c[0] in ('remove', 'keep')])
        
        # # 添加剩余的行
        # remaining_lines = file_lines[position + skip_count:]
        # if remaining_lines and remaining_lines[0].strip() and new_lines[-1].strip():
        #     new_lines.append('\n')  # 在不同块之间添加空行
        # new_lines.extend(remaining_lines)
        
        # return new_lines

        # """应用hunk的修改"""
        # if position == -1:
        #     return file_lines
        
        # new_lines = file_lines[:position]
        
        # # 获取修改序列
        # changes = hunk['changes']
        
        # # # 检查是否需要在修改块开始前添加空行
        # # if (changes and changes[0][0] == 'add' and 
        # #     new_lines and new_lines[-1].strip() and 
        # #     not any(c[0] == 'remove' for c in changes[:1])):
        # #     new_lines.append('\n')
        
        # last_line_was_empty = False
        # for i, (change_type, content) in enumerate(changes):
        #     if change_type == 'add':
        #         if content == '':
        #             if not last_line_was_empty:
        #                 new_lines.append('\n')
        #                 last_line_was_empty = True
        #         else:
        #             new_lines.append(content + '\n')
        #             last_line_was_empty = False
        #     elif change_type == 'keep':
        #         if not (new_lines and new_lines[-1].rstrip() == content):
        #             new_lines.append(content + '\n')
        #             last_line_was_empty = False
        
        # # 计算要跳过的行数
        # skip_count = len([c for c in changes if c[0] in ('remove', 'keep')])
        
        # # 添加剩余的行
        # remaining_lines = file_lines[position + skip_count:]
        
        # # # 检查是否需要在修改块结束后添加空行
        # # if (changes and changes[-1][0] == 'add' and 
        # #     not changes[-1][1] == '' and  # 最后一个添加的不是空行
        # #     remaining_lines and remaining_lines[0].strip()):
        # #     new_lines.append('\n')
        
        # new_lines.extend(remaining_lines)
        
        # return new_lines
        if position == -1:
            return file_lines
        
        new_lines = file_lines[:position]
        current_pos = position
        
        # 应用修改
        i = 0
        while i < len(hunk['lines']):
            line_type, content = hunk['lines'][i]
            
            if line_type == 'remove':
                # 删除行：跳过原文件中的对应行
                current_pos += 1
            elif line_type == 'add':
                # 添加行：插入新内容
                new_lines.append(content + '\n')
            elif line_type == 'context':
                # 上下文行：保持原样
                if current_pos < len(file_lines):
                    new_lines.append(file_lines[current_pos])
                    current_pos += 1
            
            i += 1
        
        # 添加剩余的行
        new_lines.extend(file_lines[current_pos:])
        return new_lines
        

    def validate_modification(self, original_lines, modified_lines):
        """
        验证修改的合理性
        
        Args:
            original_lines: 原始文件的行列表
            modified_lines: 修改后的行列表
            
        Returns:
            bool: 修改是否合理
        """
        try:
            # 1. 基本检查
            if not modified_lines:
                logger.error("修改后的文件为空")
                return False
            
            # 2. 检查修改幅度
            if len(modified_lines) < len(original_lines) * 0.5 or \
               len(modified_lines) > len(original_lines) * 1.5:
                logger.warning("文件大小变化过大")
                return False
            
            # 3. 括号匹配检查
            if not self._check_brackets(modified_lines):
                logger.error("括号匹配错误")
                return False
            
            # 4. 缩进一致性检查
            if not self._check_indentation(modified_lines):
                logger.error("缩进不一致")
                return False
            
            # 5. 关键结构检查
            if not self._check_key_structures(original_lines, modified_lines):
                logger.error("关键结构被破坏")
                return False
            
            # # 6. 语言特定检查
            # if not self._check_language_specific(modified_lines):
            #     logger.error("语言特定规则验证失败")
            #     return False
            
            return True
        
        except Exception as e:
            logger.error(f"验证过程出错: {str(e)}")
            return False
        
    def _check_brackets(self, lines: List[str]) -> bool:
        """检查括号匹配"""
        stack = []
        brackets = {')': '(', '}': '{', ']': '['}
        
        for line in lines:
            for char in line:
                if char in '({[':
                    stack.append(char)
                elif char in ')}]':
                    if not stack or stack.pop() != brackets[char]:
                        return False
                    
        return len(stack) == 0

    def _check_indentation(self, lines: List[str]) -> bool:
        """检查缩进一致性"""
        prev_indent = 0
        for line in lines:
            if not line.strip():  # 跳过空行
                continue
            
            # 计算当前行的缩进
            current_indent = len(line) - len(line.lstrip())
            
            # 缩进变化不应过大
            if abs(current_indent - prev_indent) > 8:  # 允许最大缩进变化
                return False
            
            prev_indent = current_indent
        return True

    def _check_key_structures(self, original_lines: List[str], modified_lines: List[str]) -> bool:
        """检查关键结构完整性"""
        # 提取关键标识符（函数名、类名等）
        def extract_identifiers(lines):
            identifiers = set()
            # 简单的函数/类定义模式
            patterns = [
                r'def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\(',
                r'class\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*[:\(]'
            ]
            for line in lines:
                for pattern in patterns:
                    matches = re.finditer(pattern, line)
                    for match in matches:
                        identifiers.add(match.group(1))
            return identifiers
        
        original_ids = extract_identifiers(original_lines)
        modified_ids = extract_identifiers(modified_lines)
        
        # 检查重要标识符是否保留
        important_ids_preserved = all(id_ in modified_ids 
                                    for id_ in original_ids 
                                    if not id_.startswith('_'))
        
        # 检查修改是否引入了过多的新标识符
        new_ids = modified_ids - original_ids
        if len(new_ids) > len(original_ids) * 0.3:  # 允许30%的新标识符
            return False
        
        return important_ids_preserved

    def _check_language_specific(self, lines: List[str]) -> bool:
        """语言特定的检查"""
        # 这里可以根据文件扩展名添加特定语言的检查
        # 例如，对于Python文件：
        
        in_function = False
        in_class = False
        
        for line in lines:
            stripped_line = line.strip()
            
            # 检查基本语法结构
            if stripped_line.startswith('def '):
                in_function = True
            elif stripped_line.startswith('class '):
                in_class = True
            elif stripped_line and not line[0].isspace():  # 非空行且无缩进
                in_function = False
                in_class = False
            
            # 检查基本语法错误
            if ':' in stripped_line:
                if stripped_line.count(':') > 1 and \
                   not any(quote in stripped_line for quote in ['"', "'"]):
                    return False
                
            # 检查缩进一致性
            if in_function or in_class:
                if stripped_line and not line[0].isspace():
                    return False
                
        return True

    def generate_adapted_file(self, llm_response_path, source_dir, output_dir):
        """
        将 LLM 生成的 patch 应用到旧版本的源文件
        
        Args:
            llm_response_path: LLM 生成的patch内容路径
            source_dir: 源文件目录（旧版本）
            output_dir: 输出目录
        """
        # read_text() 函数会使用html转义符，需要解码
        llm_response = Path(llm_response_path).read_text()
        # 解码HTML转义字符
        llm_response = html.unescape(llm_response)
        
        logger.info(f"the first diff_content:\n{llm_response}")
        
        # 解析patch，按文件分组
        current_file = None
        current_diff = []
        files_to_patch = {}
        
        # 解析git format-patch格式的输出
        for line in llm_response.splitlines():
            if line.startswith('diff --git'):
                if current_file and current_diff:
                    files_to_patch[current_file] = '\n'.join(current_diff)
                # 从 diff --git a/path/to/file b/path/to/file 提取文件路径
                current_file = line.split(' ')[-1][2:]  # 取 b/path/to/file 并去掉 b/
                current_diff = []
                continue
            if line.startswith('index '):
                continue
            if current_file:
                current_diff.append(line)
        
        # 添加最后一个文件的diff
        if current_file and current_diff:
            files_to_patch[current_file] = '\n'.join(current_diff)
        
        # 创建输出目录
        output_dir = Path(output_dir)
        if not output_dir.exists():
            output_dir.mkdir(parents=True)
        
        # 处理每个文件
        for file_path, diff_content in files_to_patch.items():
            source_file = Path(source_dir) / file_path
            target_file = output_dir / file_path
            logger.info(f"the diff_content:\n{diff_content}")
            
            if not source_file.exists():
                logger.error(f"源文件不存在: {source_file}, 已跳过")
                continue
            
            # 确保目标目录存在
            target_file.parent.mkdir(parents=True, exist_ok=True)
            
            try:
                # 读取源文件内容
                with open(source_file, 'r', encoding='utf-8') as f:
                    source_lines = f.readlines()
                
                # 解析patch中的hunks
                hunks_text = re.split(r'(?=@@ -\d+,\d+ \+\d+,\d+ @@)', diff_content)
                hunks_text = [h for h in hunks_text if h.strip()]
                
                # 处理每个hunk
                modified_lines = source_lines[:]
                for hunk_text in hunks_text:
                    try:
                        logger.info(f"hunk_text is {hunk_text}")
                        # # 解析hunk
                        # hunk = self.parse_hunk(hunk_text)
                        # logger.info(f"hunk is {hunk}")
                        # # 找到最佳匹配位置
                        # position = self.find_best_match_position(modified_lines, hunk)
                        # logger.info(f"position is {position}")
                        
                        # if position == -1:
                        #     logger.warning(f"在文件 {file_path} 中未找到hunk的合适位置:\n{hunk_text}")
                        #     continue
                        
                        # # 应用修改
                        # modified_lines = self.apply_hunk(modified_lines, position, hunk)
                        
                        # # 验证修改
                        # if not self.validate_modification(source_lines, modified_lines):
                        #     logger.warning(f"文件 {file_path} 的修改验证失败")
                        #     continue
                                                    
                        # 解析hunk，现在返回的是一个列表
                        hunks = self.parse_hunk(hunk_text)
                        
                        # 处理每个子hunk
                        for hunk in hunks:
                            logger.info(f"hunk is {hunk}")
                            # 找到最佳匹配位置
                            position = self.find_best_match_position(modified_lines, hunk)
                            logger.info(f"position is {position}")
                            
                            if position == -1:
                                logger.warning(f"在文件中未找到hunk的合适位置:\n{hunk}")
                                continue
                            
                            # 应用修改
                            modified_lines = self.apply_hunk(modified_lines, position, hunk)
                            
                            # 验证修改
                            if not self.validate_modification(source_lines, modified_lines):
                                logger.warning("修改验证失败")
                                continue
                                
                    except Exception as e:
                        logger.error(f"处理hunk时出错: {str(e)}\n{hunk_text}")
                        continue
                
                # 写入修改后的文件
                with open(target_file, 'w', encoding='utf-8') as f:
                    f.writelines(modified_lines)
                logger.info(f"成功应用修改到文件: {file_path}")
                
            except Exception as e:
                logger.error(f"处理文件 {file_path} 时出错: {str(e)}")
                continue

    # def apply_llm_patch(self, llm_response_path):
    #     """
    #     将 LLM 生成的 patch 应用到旧版本的源文件
        
    #     :param llm_response_path: LLM 生成的响应内容路径
    #     """
    #     llm_response = llm_response_path.read_text()
    #     logger.info("test--------------")
        
    #     base_dir = Path('patchfile') / f"{self.owner}_{self.repo}_{self.patch_commit_sha[:6]}"
    #     target_dir = base_dir / self.target_version
    #     output_dir = base_dir / f"adapted_{self.target_version}"
        
    #     # 解析 LLM 响应，提取每个文件的 diff
    #     current_file = None
    #     current_diff = []
    #     files_to_patch = {}
        
    #     for line in llm_response.splitlines():
    #         if line.startswith('diff --git'):
    #             if current_file and current_diff:
    #                 files_to_patch[current_file] = '\n'.join(current_diff)
    #             # 从 diff --git a/path/to/file b/path/to/file 提取文件路径
    #             current_file = line.split(' ')[-1][2:] # 取 b/path/to/file 并去掉 b/
    #             current_diff = []
    #             continue
    #         if line.startswith('index '):
    #             continue
    #         if current_file:
    #             current_diff.append(line)
        
    #     # 添加最后一个文件的 diff
    #     if current_file and current_diff:
    #         files_to_patch[current_file] = '\n'.join(current_diff)
        
    #     # 创建输出目录
    #     if not output_dir.exists():
    #         output_dir.mkdir(parents=True)
        
    #     # 应用修改到每个文件
    #     for file_path, diff_content in files_to_patch.items():
    #         source_file = target_dir / file_path
    #         target_file = output_dir / file_path
            
    #         if not source_file.exists():
    #             logger.error(f"源文件不存在: {source_file}")
    #             continue
                
    #         # 确保目标目录存在
    #         target_file.parent.mkdir(parents=True, exist_ok=True)
            
    #         # 应用 diff
    #         try:
    #             self._apply_patch_to_file(diff_content, source_file, target_file)
    #             logger.info(f"成功应用修改到文件: {file_path}")
    #         except Exception as e:
    #             logger.error(f"应用修改到文件 {file_path} 时出错: {str(e)}")

    # def _apply_patch_to_file(self, diff_content, source_file, target_file):
    #     """Enhanced patch application with structural validation"""
    #     def find_function_definition(lines, func_name):
    #         """Find the exact function definition with more flexible matching"""
    #         # 更灵活的函数定义模式，允许多行定义
    #         pattern = re.compile(rf'^(?:static\s+)?(?:int|void|char\s*\*)\s+{re.escape(func_name)}\s*\(.*?(?:\)|\n)')
            
    #         for i, line in enumerate(lines):
    #             if pattern.match(line.strip()):
    #                 # 如果函数定义跨多行，找到完整定义
    #                 if not line.strip().endswith(')'):
    #                     bracket_count = line.count('(') - line.count(')')
    #                     j = i + 1
    #                     while j < len(lines) and bracket_count > 0:
    #                         bracket_count += lines[j].count('(') - lines[j].count(')')
    #                         j += 1
    #                     if bracket_count == 0:
    #                         return i
    #                 else:
    #                     return i
    #         return None

    #     def find_function_bounds(lines, start_line):
    #         """Find function bounds with improved bracket matching"""
    #         if start_line is None:
    #             return None, None
            
    #         # 从函数定义开始查找开括号
    #         bracket_count = 0
    #         found_opening = False
    #         current_line = start_line
            
    #         # 首先找到函数定义结束和开括号
    #         while current_line < len(lines):
    #             line = lines[current_line]
    #             # 计算当前行的括号
    #             bracket_count += line.count('{') - line.count('}')
                
    #             if '{' in line:
    #                 found_opening = True
    #                 break
                    
    #             current_line += 1
                
    #             # 如果搜索太远还没找到开括号，可能是出错了
    #             if current_line - start_line > 10:  # 设置合理的搜索范围
    #                 return None, None
            
    #         if not found_opening:
    #             return None, None
            
    #         # 继续查找直到找到匹配的闭括号
    #         for i in range(current_line + 1, len(lines)):
    #             bracket_count += lines[i].count('{') - lines[i].count('}')
    #             if bracket_count == 0:
    #                 return start_line, i
            
    #         return None, None

    #     def validate_structure(lines):
    #         """Validate basic code structure"""
    #         bracket_count = 0
    #         for line in lines:
    #             bracket_count += line.count('{') - line.count('}')
    #         return bracket_count == 0

    #     with open(source_file, 'r', encoding='utf-8') as f:
    #         original_lines = f.readlines()

    #     # Parse hunks and group by function
    #     function_changes = {}
    #     for hunk in self._parse_hunks(diff_content):
    #         if hunk['function']:
    #             # 提取函数名，处理可能的函数签名变化
    #             func_match = re.search(r'\b(\w+)\s*\(', hunk['function'])
    #             if func_match:
    #                 func_name = func_match.group(1)
    #                 if func_name not in function_changes:
    #                     function_changes[func_name] = []
    #                 function_changes[func_name].append(hunk)
    #                 logger.debug(f"Found changes for function: {func_name}")

    #     # Apply changes function by function
    #     new_lines = original_lines[:]
    #     modified = False
        
    #     for func_name, hunks in function_changes.items():
    #         logger.debug(f"Processing function: {func_name}")
            
    #         # Find function definition
    #         func_start = find_function_definition(new_lines, func_name)
    #         if func_start is None:
    #             logger.error(f"Could not find function definition: {func_name}")
    #             continue
            
    #         # Find function bounds
    #         start, end = find_function_bounds(new_lines, func_start)
    #         if start is None:
    #             logger.error(f"Could not find function bounds: {func_name}")
    #             continue

    #         logger.debug(f"Found function {func_name} from line {start} to {end}")

    #         # Apply changes within function bounds
    #         function_lines = new_lines[start:end + 1]
    #         modified_lines = self._apply_changes_to_function(
    #             function_lines,
    #             hunks,
    #             func_name
    #         )

    #         # Validate and replace
    #         if validate_structure(modified_lines):
    #             if modified_lines != function_lines:  # 只有在实际有修改时才替换
    #                 new_lines[start:end + 1] = modified_lines
    #                 modified = True
    #                 logger.info(f"Successfully modified function: {func_name}")
    #         else:
    #             logger.error(f"Invalid structure after modifying {func_name}")

    #     # Only write if there were actual modifications
    #     if modified:
    #         with open(target_file, 'w', encoding='utf-8') as f:
    #             f.writelines(new_lines)
    #         logger.info(f"Successfully wrote changes to {target_file}")
    #     else:
    #         logger.warning("No modifications were made to the file")

    # def _apply_changes_to_function(self, function_lines, hunks, func_name):
    #     """Apply changes to a single function with enhanced content matching"""
    #     modified_lines = function_lines[:]
        
    #     for hunk in hunks:
    #         # 使用更灵活的内容匹配
    #         position = self._find_best_match_position(
    #             modified_lines,
    #             hunk['context_before'],
    #             hunk['context_after'],
    #             threshold=0.7  # 降低阈值以允许更多的近似匹配
    #         )
            
    #         if position is not None:
    #             # Create backup
    #             backup_lines = modified_lines[:]
                
    #             try:
    #                 self._apply_hunk_changes(
    #                     modified_lines,
    #                     position,
    #                     hunk['removed_lines'],
    #                     hunk['added_lines'],
    #                     similarity_threshold=0.7  # 降低相似度要求
    #                 )
                    
    #                 # 验证修改后的代码结构
    #                 if not self._validate_function_structure(modified_lines):
    #                     logger.warning(f"Invalid structure after changes in {func_name}, rolling back")
    #                     modified_lines = backup_lines
    #             except Exception as e:
    #                 logger.error(f"Error applying changes to {func_name}: {str(e)}")
    #                 modified_lines = backup_lines
        
    #     return modified_lines

    # def _parse_hunks(self, diff_content):
    #     """解析 diff 内容提取每个 hunk 的上下文和修改内容"""
    #     hunks = []
    #     current_hunk = None
        
    #     # 用于跟踪当前正在处理的函数
    #     current_function = None
        
    #     for line in diff_content.splitlines():
    #         # 检测函数定义
    #         if re.match(r'^[+-]?\s*(?:static\s+)?(?:int|void|char\s*\*)\s+\w+\s*\(', line):
    #             current_function = line.strip()
    #             if current_function.startswith(('+', '-')):
    #                 current_function = current_function[1:]
            
    #         # 跳过文件头
    #         if line.startswith(('---', '+++', 'index', 'diff --git')):
    #             continue
            
    #         # 新的 hunk 开始
    #         if line.startswith('@@'):
    #             if current_hunk:
    #                 hunks.append(current_hunk)
    #             current_hunk = {
    #                 'function': current_function,
    #                 'content': line + '\n',
    #                 'context_before': [],
    #                 'context_after': [],
    #                 'removed_lines': [],
    #                 'added_lines': [],
    #                 'in_change': False
    #             }
    #             continue
            
    #         if not current_hunk:
    #             continue
            
    #         current_hunk['content'] += line + '\n'
            
    #         if line.startswith(' '):
    #             if current_hunk['in_change']:
    #                 current_hunk['context_after'].append(line[1:])
    #             else:
    #                 current_hunk['context_before'].append(line[1:])
    #         elif line.startswith('-'):
    #             current_hunk['in_change'] = True
    #             current_hunk['removed_lines'].append(line[1:])
    #         elif line.startswith('+'):
    #             current_hunk['in_change'] = True
    #             current_hunk['added_lines'].append(line[1:])
        
    #     if current_hunk:
    #         hunks.append(current_hunk)
        
    #     return hunks

    # def _find_best_match_position(self, context_before, context_after, file_lines, 
    #                             context_size=3, threshold=0.8):
    #     """
    #     使用上下文匹配找到最佳修改位置
        
    #     :param context_before: 修改前的上下文行
    #     :param context_after: 修改后的上下文行
    #     :param file_lines: 文件的所有行
    #     :param context_size: 匹配的上下文大小
    #     :param threshold: 匹配度阈值
    #     :return: 最佳匹配位置，如果没有找到好的匹配则返回 None
    #     """
    #     if not context_before and not context_after:
    #         return None
            
    #     best_match_score = 0
    #     best_position = None
        
    #     # 构建上下文匹配字符串
    #     context_str = ''.join(context_before + context_after)
        
    #     # 在文件中滑动窗口寻找最佳匹配
    #     for i in range(len(file_lines) - len(context_before + context_after) + 1):
    #         window = file_lines[i:i + len(context_before + context_after)]
    #         window_str = ''.join(window)
            
    #         # 计算匹配度
    #         matcher = SequenceMatcher(None, context_str, window_str)
    #         score = matcher.ratio()
            
    #         if score > best_match_score:
    #             best_match_score = score
    #             best_position = i + len(context_before)
        
    #     # 如果最佳匹配超过阈值，返回位置
    #     if best_match_score >= threshold:
    #         return best_position
        
    #     # 如果没有找到好的匹配，尝试只匹配前后文的一部分
    #     if len(context_before) > context_size:
    #         context_before = context_before[-context_size:]
    #     if len(context_after) > context_size:
    #         context_after = context_after[:context_size]
            
    #     return self._find_best_match_position(context_before, context_after, 
    #                                         file_lines, context_size, threshold - 0.1)

    # def _apply_hunk_changes(self, lines, position, removed_lines, added_lines, similarity_threshold=0.7):
    #     """Apply changes with more flexible matching"""
    #     # 获取实际要修改的行
    #     actual_lines = lines[position:position + len(removed_lines)]
        
    #     # 使用更灵活的内容匹配
    #     if self._lines_match(actual_lines, removed_lines, threshold=similarity_threshold):
    #         # 删除旧行
    #         del lines[position:position + len(removed_lines)]
    #         # 插入新行
    #         for i, line in enumerate(added_lines):
    #             if not line.endswith('\n'):
    #                 line += '\n'
    #             lines.insert(position + i, line)
    #     else:
    #         logger.warning(f"Content mismatch. Expected:\n{''.join(removed_lines)}\nActual:\n{''.join(actual_lines)}")
    #         # 强制应用修改，但保留日志
    #         del lines[position:position + len(removed_lines)]
    #         for i, line in enumerate(added_lines):
    #             if not line.endswith('\n'):
    #                 line += '\n'
    #             lines.insert(position + i, line)

    # def _lines_match(self, lines1, lines2, threshold=0.8):
    #     """
    #     检查两组行是否匹配
        
    #     :param lines1: 第一组行
    #     :param lines2: 第二组行
    #     :param threshold: 匹配度阈值
    #     :return: 是否匹配
    #     """
    #     if not lines1 or not lines2:
    #         return False
            
    #     text1 = ''.join(lines1)
    #     text2 = ''.join(lines2)
        
    #     matcher = SequenceMatcher(None, text1, text2)
    #     return matcher.ratio() >= threshold

    # def _find_anchor_points(self, context_lines):
    #     """
    #     在上下文中找到可以作为锚点的唯一可识别点
    #     返回一个 (pattern, type) 元组列表
    #     """
    #     anchors = []
        
    #     for line in context_lines:
    #         # 查找函数调用
    #         if re.search(r'\w+\([^)]*\)', line):
    #             func_call = re.search(r'(\w+\([^)]*\))', line).group(1)
    #             anchors.append((func_call, 'function_call'))
            
    #         # 查找控制结构
    #         elif any(keyword in line for keyword in ['if', 'for', 'while', 'switch', 'return']):
    #             control = line.strip()
    #             anchors.append((control, 'control_structure'))
            
    #         # 查找变量声明
    #         elif re.search(r'^\s*\w+\s+\w+\s*[=;]', line):
    #             declaration = line.strip()
    #             anchors.append((declaration, 'declaration'))
        
    #     return anchors

    # def _find_position_with_anchors(self, lines, context_before, context_after, anchors):
    #     """
    #     使用锚点和上下文找到最佳修改位置
    #     """
    #     best_position = None
    #     best_score = 0
        
    #     # 如果有锚点，首先尝试使用锚点定位
    #     for anchor, anchor_type in anchors:
    #         for i, line in enumerate(lines):
    #             if anchor in line:
    #                 # 验证周围上下文
    #                 context_score = self._verify_surrounding_context(
    #                     lines, i, context_before, context_after
    #                 )
    #                 if context_score > best_score:
    #                     best_score = context_score
    #                     best_position = i
        
    #     # 如果没有找到好的锚点匹配，回退到普通上下文匹配
    #     if best_score < 0.8:
    #         best_position = self._find_best_match_position(
    #             context_before, context_after, lines
    #         )
        
    #     return best_position

    # def _verify_surrounding_context(self, lines, position, context_before, context_after, 
    #                               context_size=3):
    #     """
    #     验证给定位置周围的上下文匹配程度
    #     返回0-1之间的匹配分数
    #     """
    #     start = max(0, position - len(context_before))
    #     end = min(len(lines), position + len(context_after))
        
    #     # 获取实际上下文
    #     actual_context = lines[start:end]
    #     expected_context = context_before + context_after
        
    #     # 使用序列匹配计算相似度
    #     matcher = SequenceMatcher(None, 
    #                             ''.join(actual_context), 
    #                             ''.join(expected_context))
    #     return matcher.ratio()

    # def _validate_function_structure(self, lines):
    #     """
    #     验证函数结构的完整性
    #     检查括号匹配、基本语法等
    #     """
    #     # 检查括号平衡
    #     bracket_count = 0
    #     for line in lines:
    #         bracket_count += line.count('{') - line.count('}')
    #         # 括号数不能小于0
    #         if bracket_count < 0:
    #             return False
        
    #     # 最终括号应该平衡
    #     if bracket_count != 0:
    #         return False
        
    #     # 检查基本语法结构
    #     for line in lines:
    #         # 检查未闭合的字符串
    #         if line.count('"') % 2 != 0:
    #             return False
            
    #         # 检查分号结尾（忽略预处理指令、花括号行等）
    #         stripped = line.strip()
    #         if (stripped and 
    #             not stripped.startswith('#') and 
    #             not stripped.endswith('{') and 
    #             not stripped.endswith('}') and 
    #             not stripped.endswith(';')):
    #             return False
        
    #     return True

    # def run(self):
