import ast
import os
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
import sys
from collections import defaultdict
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

# 设置字体
matplotlib.rcParams['font.sans-serif'] = ['SimHei']  # 黑体
matplotlib.rcParams['axes.unicode_minus'] = False   # 处理负号的显示

def extract_variable_names(source):
    """解析代码并提取变量名"""
    tree = ast.parse(source)
    variable_names = set()
    
    class VariableVisitor(ast.NodeVisitor):
        def visit_Name(self, node):
            if isinstance(node.ctx, ast.Store):  # 只关注赋值的变量
                variable_names.add(node.id)
            self.generic_visit(node)

    visitor = VariableVisitor()
    visitor.visit(tree)
    return variable_names

def extract_function_and_class_names(source):
    """提取函数和类的名称"""
    tree = ast.parse(source)
    function_names = set()
    class_names = set()
    
    for node in ast.walk(tree):
        if isinstance(node, ast.FunctionDef):
            function_names.add(node.name)
        elif isinstance(node, ast.ClassDef):
            class_names.add(node.name)

    return function_names, class_names

def serialize_ast(source):
    """标准化并序列化 AST,忽略位置信息"""
    tree = ast.parse(source)
    return ast.dump(tree, annotate_fields=True, include_attributes=False)

def calculate_similarity(files):
    """计算文件之间的相似度"""
    vectorizer = TfidfVectorizer().fit_transform(files)
    vectors = vectorizer.toarray()
    cosine_sim = cosine_similarity(vectors)
    return cosine_sim

def visualize_similarity_matrix(df, output_path):
    """可视化相似度矩阵"""
    plt.figure(figsize=(10, 8))
    sns.heatmap(df, annot=True, fmt=".3f", cmap='coolwarm')  # 热图中保留三位小数
    plt.title('代码结构相似度热图')
    plt.savefig(output_path)
    plt.close()  # 关闭当前图形，防止后续重复显示

def main(folder_path):
    """获取文件夹内所有的 Python 文件，提取变量名、函数名和类名并计算相似度"""
    # 创建输出目录
    output_folder = os.path.join(folder_path, '输出结果')
    os.makedirs(output_folder, exist_ok=True)  # 创建文件夹，如果已存在则不报错

    file_paths = [os.path.join(folder_path, file) for file in os.listdir(folder_path) if file.endswith('.py')]
    
    variable_dict = defaultdict(lambda: defaultdict(int))
    function_dict = defaultdict(lambda: defaultdict(int))
    class_dict = defaultdict(lambda: defaultdict(int))
    serialized_ast_dict = {}

    for file_path in file_paths:
        try:
            print(f"正在处理文件 {file_path}")
            with open(file_path, 'r', encoding='utf-8') as file:
                source = file.read()
                variables = extract_variable_names(source)
                functions, classes = extract_function_and_class_names(source)

                for var in variables:
                    variable_dict[var][os.path.basename(file_path)] += 1
                for func in functions:
                    function_dict[func][os.path.basename(file_path)] += 1
                for cls in classes:
                    class_dict[cls][os.path.basename(file_path)] += 1

                serialized_ast_dict[os.path.basename(file_path)] = serialize_ast(source)

        except (SyntaxError, FileNotFoundError) as e:
            print(f"处理文件 {file_path} 时出错: {e}")

    file_names = list(serialized_ast_dict.keys())
    serialized_asts = list(serialized_ast_dict.values())
    print(f"正在执行相似度计算...")
    similarity_matrix = calculate_similarity(serialized_asts)

    # 准备输出相似度结果
    print(f"准备输出相似度结果到 Excel 文件...")
    output_file = os.path.join(output_folder, '相似度结果.xlsx')
    
    # 创建 DataFrame 并设置列和行索引
    df = pd.DataFrame(similarity_matrix, index=file_names, columns=file_names)
    
    # 输出到 Excel 文件，保留原始数据精度
    df.to_excel(output_file)
    print(f"输出相似度结果到 Excel 文件成功！")
    # 打印格式化输出，保留到小数点原始精度（没有改动）
    #print("\t" + "\t".join(file_names))
    #for i, file_name in enumerate(file_names):
    #    print(file_name + "\t" + "\t".join(f"{similarity_matrix[i][j]:.6f}" for j in range(len(file_names))))  # 这里可以根据需要调整保留的位数

    # 将重复的变量名、函数名和类名输出到 TXT 文件
    with open(os.path.join(output_folder, '重复项结果.txt'), 'w', encoding='utf-8') as result_file:
        print(f"正在输出重复项结果到 TXT 文件...")
        # 输出标题
        result_file.write("重复的变量名:\n")
        result_file.write(f"{'变量名':<20}{'出现次数':<15}{'出现文件'}\n")
        result_file.write("=" * 50 + "\n")
        
        for var, files in variable_dict.items():
            if len(files) > 1:  # 只输出出现次数大于 1 的变量
                count = sum(files.values())
                file_list = ', '.join([f"{file}({count})" for file, count in files.items()])
                result_file.write(f"{var:<20}{count:<15}{file_list}\n")

        result_file.write("\n重复的函数名:\n")
        result_file.write(f"{'函数名':<20}{'出现次数':<15}{'出现文件'}\n")
        result_file.write("=" * 50 + "\n")
        
        for func, files in function_dict.items():
            if len(files) > 1:  # 只输出出现次数大于 1 的函数
                count = sum(files.values())
                file_list = ', '.join([f"{file}({count})" for file, count in files.items()])
                result_file.write(f"{func:<20}{count:<15}{file_list}\n")

        result_file.write("\n重复的类名:\n")
        result_file.write(f"{'类名':<20}{'出现次数':<15}{'出现文件'}\n")
        result_file.write("=" * 50 + "\n")
        
        for cls, files in class_dict.items():
            if len(files) > 1:  # 只输出出现次数大于 1 的类
                count = sum(files.values())
                file_list = ', '.join([f"{file}({count})" for file, count in files.items()])
                result_file.write(f"{cls:<20}{count:<15}{file_list}\n")
        
        print(f"输出重复项结果到 TXT 文件成功！")

    # 可视化相似度矩阵
    visualize_similarity_matrix(df, os.path.join(output_folder, '相似度热图.png'))
    print(f"相似度热图已保存到 {os.path.join(output_folder, '相似度热图.png')}")

if __name__ == "__main__":
    # folder_path = '测试数据集'  # 指定文件夹名称
    # main(folder_path)
    if len(sys.argv) < 2:
        print("请提供需要处理的文件夹路径。")
        sys.exit(1)

    folder_path = sys.argv[1]  # 从命令行参数获取文件夹路径
    main(folder_path)
