import os
import re
import difflib
import argparse
from pathlib import Path
from typing import List, Dict, Tuple, Optional, Union
import json
import chardet


class CodeFragmentMerger:
    def __init__(self, new_dir: str, old_dir: str, fragments_file: str):
        """
        初始化代码片段合并工具
        :param new_dir: 包含最新代码的目录
        :param old_dir: 需要合并代码的老目录
        :param fragments_file: 包含要合并的代码片段的文件
        """
        self.new_dir = Path(new_dir)
        self.old_dir = Path(old_dir)
        self.fragments_file = Path(fragments_file)
        self.fragments = []
        self.encoding_cache = {}  # 文件编码缓存

        # 配置参数
        self.context_lines = 5  # 用于匹配的上下文行数
        self.similarity_threshold = 0.85  # 相似度阈值
        self.duplicate_threshold = 0.95  # 重复检测阈值
        self.max_window_size = 100  # 滑动窗口最大大小
        self.supported_extensions = ('.py', '.js', '.java', '.cpp', '.c', '.h', '.go', '.rs', '.ts', '.php', '.rb')

        # 加载代码片段
        self.load_fragments()

    def detect_encoding(self, file_path: Path) -> str:
        """
        自动检测文件的编码
        :param file_path: 文件路径
        :return: 检测到的编码
        """
        # 首先检查缓存
        if file_path in self.encoding_cache:
            return self.encoding_cache[file_path]

        # 使用二进制模式读取文件内容进行检测
        with open(file_path, 'rb') as f:
            raw_data = f.read(4096)  # 读取前4KB足够检测编码

        result = chardet.detect(raw_data)
        encoding = result['encoding'] or 'utf-8'

        # 处理常见编码别名
        if encoding.lower() in ['iso-8859-1', 'latin-1']:
            encoding = 'windows-1252'
        elif encoding.lower() == 'ascii':
            encoding = 'utf-8'

        # 缓存结果
        self.encoding_cache[file_path] = encoding
        return encoding

    def read_file_with_encoding(self, file_path: Path) -> str:
        """
        使用自动检测的编码读取文件内容
        :param file_path: 文件路径
        :return: 文件内容字符串
        """
        if not file_path.exists():
            raise FileNotFoundError(f"文件不存在: {file_path}")

        encoding = self.detect_encoding(file_path)

        try:
            # 首先尝试检测到的编码
            return file_path.read_text(encoding=encoding)
        except (UnicodeDecodeError, LookupError):
            # 如果失败，尝试常见编码
            for enc in ['utf-8', 'gbk', 'gb2312', 'big5', 'latin-1', 'utf-16']:
                try:
                    return file_path.read_text(encoding=enc)
                except (UnicodeDecodeError, LookupError):
                    continue

        # 最后尝试忽略错误
        return file_path.read_text(encoding='utf-8', errors='ignore')

    def write_file_with_encoding(self, file_path: Path, content: str):
        """
        使用原始编码写入文件
        :param file_path: 文件路径
        :param content: 要写入的内容
        """
        encoding = self.detect_encoding(file_path) if file_path.exists() else 'utf-8'

        try:
            # 首先尝试检测到的编码
            file_path.write_text(content, encoding=encoding)
        except (UnicodeEncodeError, LookupError):
            # 如果失败，尝试常见编码
            for enc in ['utf-8', 'gbk', 'gb2312', 'big5', 'latin-1', 'utf-16']:
                try:
                    file_path.write_text(content, encoding=enc)
                    return
                except (UnicodeEncodeError, LookupError):
                    continue

        # 最后尝试忽略错误
        file_path.write_text(content, encoding='utf-8', errors='ignore')

    def load_fragments(self):
        """加载并解析代码片段文件"""
        if not self.fragments_file.exists():
            raise FileNotFoundError(f"片段文件不存在: {self.fragments_file}")

        content = self.read_file_with_encoding(self.fragments_file)

        # 使用分隔符分割片段
        fragments = []
        current_fragment = []
        separator = "==== FRAGMENT ===="

        for line in content.splitlines():
            if line.strip() == separator:
                if current_fragment:
                    fragments.append("\n".join(current_fragment))
                    current_fragment = []
            else:
                current_fragment.append(line)

        if current_fragment:
            fragments.append("\n".join(current_fragment))

        # 为每个片段生成签名
        self.fragments = []
        for frag in fragments:
            if frag.strip():
                signature = self.create_fragment_signature(frag)
                normalized = self.normalize_code(frag)
                self.fragments.append({
                    'content': frag,
                    'signature': signature,
                    'normalized': normalized,
                    'location': None  # 将在后续步骤中填充
                })

        print(f"加载了 {len(self.fragments)} 个代码片段")

    def create_fragment_signature(self, fragment: str) -> str:
        """
        创建代码片段的唯一签名
        :param fragment: 代码片段内容
        :return: 签名字符串
        """
        # 移除空白行和注释以创建更稳定的签名
        cleaned_lines = []
        in_comment_block = False

        for line in fragment.splitlines():
            stripped = line.strip()
            if not stripped:
                continue

            # 处理块注释
            if in_comment_block:
                if '*/' in stripped:
                    in_comment_block = False
                    stripped = stripped.split('*/', 1)[1].strip()
                else:
                    continue

            # 移除行注释和块注释
            while True:
                if stripped.startswith('/*'):
                    if '*/' in stripped:
                        stripped = stripped.split('*/', 1)[1].strip()
                    else:
                        in_comment_block = True
                        stripped = ""
                        break
                elif stripped.startswith(('//', '#', '<!--')):
                    stripped = ""
                    break
                else:
                    # 保留行内注释符号（可能在字符串中）
                    cleaned_lines.append(stripped)
                    break

        # 使用前3行和后3行创建签名
        if not cleaned_lines:
            return ""

        if len(cleaned_lines) <= 6:
            return "\n".join(cleaned_lines)

        return "\n".join(cleaned_lines[:3] + ["..."] + cleaned_lines[-3:])

    def normalize_code(self, code: str) -> str:
        """
        标准化代码以进行更好的比较
        :param code: 原始代码
        :return: 标准化后的代码
        """
        # 移除空格、空行和注释
        lines = []
        in_comment_block = False

        for line in code.splitlines():
            stripped = line.strip()
            if not stripped:
                continue

            # 处理块注释
            if in_comment_block:
                if '*/' in stripped:
                    in_comment_block = False
                    stripped = stripped.split('*/', 1)[1].strip()
                else:
                    continue

            # 移除行注释和块注释
            while True:
                if stripped.startswith('/*'):
                    if '*/' in stripped:
                        stripped = stripped.split('*/', 1)[1].strip()
                    else:
                        in_comment_block = True
                        stripped = ""
                        break
                elif stripped.startswith(('//', '#', '<!--')):
                    stripped = ""
                    break
                else:
                    # 只移除多余空格，保留基本结构
                    normalized_line = re.sub(r'\s+', ' ', stripped)
                    lines.append(normalized_line)
                    break

        return " ".join(lines)

    def find_fragment_locations(self):
        """在new_dir中定位每个代码片段的位置"""
        print(f"在 {self.new_dir} 中定位代码片段...")

        # 遍历新目录中的所有文件
        file_count = 0
        matched_fragments = 0

        for root, _, files in os.walk(self.new_dir):
            for file in files:
                if not any(file.endswith(ext) for ext in self.supported_extensions):
                    continue

                file_path = Path(root) / file
                file_count += 1

                try:
                    content = self.read_file_with_encoding(file_path)
                    relative_path = file_path.relative_to(self.new_dir)

                    for frag in self.fragments:
                        if frag['location']:
                            continue  # 已经找到位置

                        # 尝试在文件内容中查找片段（使用正则转义）
                        escaped_fragment = re.escape(frag['content'])
                        if re.search(escaped_fragment, content):
                            frag['location'] = {
                                'file': relative_path,
                                'source': 'exact_match'
                            }
                            matched_fragments += 1
                        else:
                            # 尝试使用签名匹配（使用正则转义）
                            escaped_signature = re.escape(frag['signature'])
                            if re.search(escaped_signature, content):
                                frag['location'] = {
                                    'file': relative_path,
                                    'source': 'signature_match'
                                }
                                matched_fragments += 1
                except Exception as e:  # 捕获更广泛的异常
                    print(f"处理文件 {file_path} 时出错: {str(e)}")
                    continue

        print(f"扫描了 {file_count} 个文件，定位了 {matched_fragments} 个片段")

        # 报告未定位的片段
        unmatched = [f for f in self.fragments if not f.get('location')]
        if unmatched:
            print(f"警告: {len(unmatched)} 个片段未定位:")
            for frag in unmatched[:5]:  # 只显示前5个
                print(f"  - 签名: {frag['signature'][:100]}...")

    def find_old_file(self, new_file: Path) -> Optional[Path]:
        """
        在old_dir中查找与new_file对应的文件
        :param new_file: 新目录中的相对路径
        :return: 老目录中的对应文件路径，或None
        """
        # 1. 尝试相同相对路径
        old_path = self.old_dir / str(new_file)
        if old_path.exists():
            return old_path

        # 2. 尝试相同文件名（在不同目录中）
        filename = new_file.name
        candidates = []
        for root, _, files in os.walk(self.old_dir):
            for file in files:
                if file == filename:
                    candidates.append(Path(root) / file)

        if len(candidates) == 1:
            return candidates[0]
        elif len(candidates) > 1:
            # 如果有多个候选，尝试通过内容相似度选择
            return self.select_by_content_similarity(new_file, candidates)

        # 3. 尝试通过内容相似度在整个目录中查找
        return self.find_similar_file_in_old(new_file)

    def select_by_content_similarity(self, new_file: Path, candidates: List[Path]) -> Optional[Path]:
        """
        通过内容相似度从多个候选文件中选择最佳匹配
        :param new_file: 新目录中的文件（相对路径）
        :param candidates: 老目录中的候选文件列表
        :return: 最佳匹配文件路径
        """
        try:
            new_content = self.read_file_with_encoding(self.new_dir / new_file)
            best_match = None
            best_score = 0

            for candidate in candidates:
                try:
                    old_content = self.read_file_with_encoding(candidate)
                    similarity = difflib.SequenceMatcher(
                        None, new_content, old_content
                    ).ratio()

                    if similarity > best_score:
                        best_score = similarity
                        best_match = candidate

                except Exception as e:
                    print(f"读取文件 {candidate} 时出错: {str(e)}")
                    continue

            if best_match and best_score > self.similarity_threshold:
                return best_match

        except Exception as e:
            print(f"读取文件 {new_file} 时出错: {str(e)}")

        return None

    def find_similar_file_in_old(self, new_file: Path) -> Optional[Path]:
        """
        在整个old_dir中查找与new_file最相似的文件
        :param new_file: 新目录中的文件（相对路径）
        :return: 最佳匹配文件路径
        """
        try:
            new_content = self.read_file_with_encoding(self.new_dir / new_file)
            best_match = None
            best_score = 0

            for root, _, files in os.walk(self.old_dir):
                for file in files:
                    if not any(file.endswith(ext) for ext in self.supported_extensions):
                        continue

                    candidate = Path(root) / file
                    try:
                        old_content = self.read_file_with_encoding(candidate)
                        similarity = difflib.SequenceMatcher(
                            None, new_content, old_content
                        ).ratio()

                        if similarity > best_score:
                            best_score = similarity
                            best_match = candidate

                    except Exception as e:
                        print(f"读取文件 {candidate} 时出错: {str(e)}")
                        continue

            if best_match and best_score > self.similarity_threshold:
                return best_match

        except Exception as e:
            print(f"读取文件 {new_file} 时出错: {str(e)}")

        return None

    def is_fragment_duplicate(self, old_content: str, fragment: dict) -> bool:
        """
        检查代码片段是否已经存在于老文件中
        :param old_content: 老文件内容
        :param fragment: 代码片段字典
        :return: 是否重复
        """
        # 1. 精确匹配（使用正则转义）
        escaped_fragment = re.escape(fragment['content'])
        if re.search(escaped_fragment, old_content):
            return True

        # 2. 标准化内容匹配
        normalized_old = self.normalize_code(old_content)
        if fragment['normalized'] in normalized_old:
            return True

        # 3. 签名匹配（使用正则转义）
        escaped_signature = re.escape(fragment['signature'])
        if re.search(escaped_signature, old_content):
            return True

        # 4. 相似度匹配
        frag_lines = fragment['content'].splitlines()
        old_lines = old_content.splitlines()

        # 如果片段行数太多，使用简化方法
        window_size = len(frag_lines)
        if window_size == 0:
            return False

        # 添加性能保护
        if window_size > self.max_window_size:
            # 使用标准化内容进行相似度比较
            similarity = difflib.SequenceMatcher(
                None, fragment['normalized'], normalized_old
            ).ratio()
            return similarity > self.duplicate_threshold

        # 使用滑动窗口检查相似度
        for i in range(0, len(old_lines) - window_size + 1):
            window = old_lines[i:i + window_size]
            matcher = difflib.SequenceMatcher(None, frag_lines, window)
            if matcher.ratio() > self.duplicate_threshold:
                return True

        return False

    def merge_fragment(self, old_file: Path, fragment: dict) -> int:
        """
        将代码片段合并到老文件中
        :param old_file: 老文件路径
        :param fragment: 代码片段字典
        :return: 状态码 (0=失败, 1=成功, 2=已存在)
        """
        if not old_file.exists():
            print(f"错误: 文件不存在 - {old_file}")
            return 0

        try:
            old_content = self.read_file_with_encoding(old_file)
            fragment_content = fragment['content']

            # 检查是否已经存在相同或相似的片段
            if self.is_fragment_duplicate(old_content, fragment):
                print(f"片段已存在于 {old_file}，跳过")
                return 2  # 已存在

            # 尝试查找插入位置
            position = self.find_insert_position(old_content, fragment_content)

            if position >= 0:
                # 在找到的位置插入片段
                new_content = old_content[:position] + fragment_content + "\n" + old_content[position:]
                self.write_file_with_encoding(old_file, new_content)
                print(f"成功将片段合并到 {old_file}")
                return 1  # 成功
            else:
                # 添加到文件末尾
                new_content = old_content.rstrip() + "\n\n" + fragment_content + "\n"
                self.write_file_with_encoding(old_file, new_content)
                print(f"将片段添加到 {old_file} 的末尾")
                return 1  # 成功

        except Exception as e:
            print(f"合并到 {old_file} 时出错: {str(e)}")
            return 0  # 失败

    def find_insert_position(self, old_content: str, fragment_content: str) -> int:
        """
        在文件内容中查找最佳的片段插入位置
        :param old_content: 老文件内容
        :param fragment_content: 代码片段内容
        :return: 插入位置索引，-1表示未找到
        """
        # 尝试匹配上下文
        context = self.extract_context(fragment_content)
        if context:
            context_lines = context.splitlines()
            if len(context_lines) > self.context_lines:
                # 在上下文中查找锚点行
                anchor = context_lines[self.context_lines]

                # 查找锚点行在老内容中的位置
                lines = old_content.splitlines()
                for i, line in enumerate(lines):
                    if anchor in line:
                        # 计算位置
                        position = sum(len(l) + 1 for l in lines[:i])  # +1 for newline
                        return position

        # 尝试查找相似的代码块
        old_lines = old_content.splitlines()
        frag_lines = fragment_content.splitlines()

        # 查找最长的匹配行序列
        matcher = difflib.SequenceMatcher(None, old_lines, frag_lines)
        best_match = None

        for block in matcher.get_matching_blocks():
            if block.size > 3:  # 至少匹配3行
                best_match = block
                break

        if best_match:
            # 在匹配块之后插入
            position = sum(len(l) + 1 for l in old_lines[:best_match.a + best_match.size])
            return position

        return -1

    def extract_context(self, fragment_content: str) -> str:
        """
        从代码片段中提取上下文（前N行和后M行）
        :param fragment_content: 代码片段内容
        :return: 上下文字符串
        """
        lines = fragment_content.splitlines()
        if len(lines) <= self.context_lines * 2:
            return fragment_content  # 片段太短，直接使用整个内容

        # 避免重叠
        start_index = min(self.context_lines, len(lines) // 2)
        end_index = min(self.context_lines, len(lines) - start_index)

        start = lines[:start_index]
        end = lines[-end_index:]
        return "\n".join(start + ["..."] + end)

    def run_merge(self):
        """执行合并过程"""
        print(f"开始合并到 {self.old_dir}")

        # 首先定位所有片段的位置
        self.find_fragment_locations()

        # 统计信息
        success_count = 0
        skipped_count = 0
        failed_count = 0
        failed_fragments = []

        # 处理每个片段
        for frag in self.fragments:
            if not frag.get('location'):
                print(f"跳过未定位的片段: {frag['signature'][:50]}...")
                failed_fragments.append(frag)
                failed_count += 1
                continue

            new_file = frag['location']['file']
            old_file = self.find_old_file(new_file)

            if not old_file:
                print(f"警告: 在 {self.old_dir} 中未找到 {new_file} 的对应文件")
                failed_fragments.append(frag)
                failed_count += 1
                continue

            result = self.merge_fragment(old_file, frag)
            if result == 1:
                success_count += 1
            elif result == 2:
                skipped_count += 1
            else:
                failed_fragments.append(frag)
                failed_count += 1

        # 生成报告
        print(
            f"\n合并完成: {success_count} 成功, {skipped_count} 跳过, {failed_count} 失败 (共 {len(self.fragments)} 个片段)")

        if failed_fragments:
            report_file = self.old_dir / "merge_failures.json"
            try:
                with open(report_file, 'w', encoding='utf-8') as f:
                    json.dump([{
                        'signature': frag['signature'],
                        'content': frag['content'],
                        'location': str(frag['location']['file']) if frag.get('location') else None
                    } for frag in failed_fragments], f, indent=2, ensure_ascii=False)

                print(f"失败片段详情保存在 {report_file}")
            except Exception as e:
                print(f"保存报告失败: {str(e)}")


def main():
    parser = argparse.ArgumentParser(description="智能代码片段合并工具")
    parser.add_argument('new_dir', help="包含最新代码的目录")
    parser.add_argument('old_dir', help="需要合并代码的老目录")
    parser.add_argument('fragments_file', help="包含要合并的代码片段的文件")

    args = parser.parse_args()

    print(f"代码片段合并工具启动")
    print(f"最新代码目录: {args.new_dir}")
    print(f"目标目录: {args.old_dir}")
    print(f"代码片段文件: {args.fragments_file}")

    try:
        merger = CodeFragmentMerger(args.new_dir, args.old_dir, args.fragments_file)
        merger.run_merge()
    except Exception as e:
        print(f"程序执行出错: {str(e)}")
        import traceback
        traceback.print_exc()


if __name__ == '__main__':
    main()