#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/7/27
# @Author  : YunZhen
# @File    : file_similar.py
# @Software: PyCharm
"""
计算文件相似度
"""
import re
import math
import string
import difflib
from pathlib import Path
from collections import Counter

from typing_extensions import Tuple


class __FileSimilar:
    similarity_method_map = {
        'cosine': '_cosine_similarity',
        'jaccard': '_jaccard_similarity',
        'levenshtein': '_levenshtein_similarity',
        'sequence': '_sequence_similarity'
    }

    def are_files_similar(
            self,
            file1: str | Path,
            file2: str | Path,
            similarity_threshold: float = 0.8,
            method: str = "cosine"
    ) -> Tuple[bool, float]:
        """
        比较两个文件的相似度是否超过指定阈值

        参数:
            file1: 第一个文件路径
            file2: 第二个文件路径
            similarity_threshold: 相似度阈值 (0-1)
            method: 相似度计算方法
                'cosine' - 余弦相似度 (默认)
                'jaccard' - Jaccard相似度
                'levenshtein' - 编辑距离相似度
                'sequence' - 序列匹配相似度

        返回:
            bool: 相似度是否超过阈值
        """
        # 读取文件内容
        content1, content2 = self._read_file(file1), self._read_file(file2)

        # 空文件处理
        if not content1 and not content2:
            return True, 1  # 两个空文件视为相同

        # 选择计算方法
        method = self.similarity_method_map.get(method)
        if method is not None and hasattr(self, method):
            similarity = getattr(self, method)(content1, content2)
            return similarity >= similarity_threshold, similarity
        else:
            raise ValueError(f"未知的相似度计算方法: {method}")

    @classmethod
    def _read_file(cls, file_path: str | Path) -> str:
        """读取文件内容并进行预处理"""
        path = Path(file_path)
        if not path.exists():
            raise FileNotFoundError(f"文件不存在: {path}")

        # 读取文件内容
        try:
            with open(path, 'r', encoding='utf-8') as f:
                content = f.read()
        except UnicodeDecodeError:
            # 尝试其他编码
            with open(path, 'r', encoding='latin-1') as f:
                content = f.read()

        # 基础预处理
        content = content.lower()
        content = re.sub(r'\s+', ' ', content)  # 合并连续空白
        return content.strip()

    @classmethod
    def _cosine_similarity(cls, text1: str, text2: str) -> float:
        """计算余弦相似度"""
        # 创建词频向量
        vec1 = Counter(cls._tokenize(text1))
        vec2 = Counter(cls._tokenize(text2))

        # 获取所有唯一词
        words = set(vec1.keys()) | set(vec2.keys())

        # 创建向量
        vector1 = [vec1.get(word, 0) for word in words]
        vector2 = [vec2.get(word, 0) for word in words]

        # 计算点积
        dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))

        # 计算模长
        magnitude1 = math.sqrt(sum(v ** 2 for v in vector1))
        magnitude2 = math.sqrt(sum(v ** 2 for v in vector2))

        # 避免除以零
        if magnitude1 == 0 or magnitude2 == 0:
            return 0.0

        return dot_product / (magnitude1 * magnitude2)

    @classmethod
    def _jaccard_similarity(cls, text1: str, text2: str) -> float:
        """计算Jaccard相似度"""
        set1 = set(cls._tokenize(text1))
        set2 = set(cls._tokenize(text2))

        intersection = len(set1 & set2)
        union = len(set1 | set2)

        if union == 0:
            return 1.0  # 两个空集

        return intersection / union

    @classmethod
    def _levenshtein_similarity(cls, text1: str, text2: str) -> float:
        """基于编辑距离的相似度"""
        # 计算编辑距离
        n, m = len(text1), len(text2)
        if n == 0 or m == 0:
            return 0.0

        # 创建距离矩阵
        d = [[0] * (m + 1) for _ in range(n + 1)]

        # 初始化边界
        for i in range(n + 1):
            d[i][0] = i
        for j in range(m + 1):
            d[0][j] = j

        # 计算距离
        for i in range(1, n + 1):
            for j in range(1, m + 1):
                cost = 0 if text1[i - 1] == text2[j - 1] else 1
                d[i][j] = min(
                    d[i - 1][j] + 1,  # 删除
                    d[i][j - 1] + 1,  # 插入
                    d[i - 1][j - 1] + cost  # 替换
                )

        distance = d[n][m]
        max_len = max(n, m)
        return 1 - (distance / max_len)

    @classmethod
    def _sequence_similarity(cls, text1: str, text2: str) -> float:
        """基于序列匹配的相似度"""
        matcher = difflib.SequenceMatcher(None, text1, text2)
        return matcher.ratio()

    @classmethod
    def _tokenize(cls, text: str) -> list[str]:
        """文本分词处理"""
        # 移除标点
        text = text.translate(str.maketrans('', '', string.punctuation))
        # 分词
        return text.split()


def main(file1: str | Path, file2: str | Path, similarity_threshold: float = 0.8, method: str = "cosine"):
    return __FileSimilar().are_files_similar(file1, file2, similarity_threshold, method)


if __name__ == '__main__':
    print(main(
        r'E:\PythonProject\yun-cool-cinema\tools\file_similar.py',
        r'/tools/scan_movie_mate_path.py'
    ))