import difflib
import tokenize
from io import StringIO
import os
import pandas as pd  # 引入 pandas 库
import sys

def read_file(file_path):
    with open(file_path, 'r', encoding='utf-8') as file:
        return file.read()

def remove_comments_and_docstrings(source):
    io_obj = StringIO(source)
    out = ""
    prev_toktype = tokenize.INDENT
    last_lineno = -1
    last_col = 0
    for tok in tokenize.generate_tokens(io_obj.readline):
        token_type = tok[0]
        start_line, start_col = tok[2]
        end_line, end_col = tok[3]
        if start_line > last_lineno:
            last_col = 0
        if start_col > last_col:
            out += (" " * (start_col - last_col))
        if token_type == tokenize.COMMENT:
            pass
        elif token_type == tokenize.STRING and prev_toktype == tokenize.INDENT:
            # Docstring
            pass
        else:
            out += tok[1]
        prev_toktype = token_type
        last_col = end_col
        last_lineno = end_line
    out = '\n'.join([line.rstrip() for line in out.split('\n')])
    return out

def remove_empty_lines(code):
    # 移除代码中的空行
    return '\n'.join(line for line in code.splitlines() if line.strip())

def calculate_similarity(code1, code2):
    # 移除空行后计算相似度
    code1_cleaned = remove_empty_lines(code1)
    code2_cleaned = remove_empty_lines(code2)
    seq = difflib.SequenceMatcher(None, code1_cleaned, code2_cleaned)
    return seq.ratio()

def extract_similar_code(code1, code2):
    # 使用 ndiff 提取相似的代码片段
    diff = difflib.ndiff(code1.splitlines(), code2.splitlines())
    similar_code = [line for line in diff if line.startswith('  ') and line.strip()]  # 只保留相同的非空行
    return '\n'.join(similar_code)

def main(folder_path):
    # 创建输出目录
    output_folder = os.path.join(folder_path, '输出结果')
    os.makedirs(output_folder, exist_ok=True)  # 创建文件夹，如果已存在则不会报错

    # 获取文件夹内所有的 Python 文件
    file_paths = [os.path.join(folder_path, file) for file in os.listdir(folder_path) if file.endswith('.py')]
    
    cleaned_codes = {}
    file_names = [os.path.splitext(os.path.basename(file))[0] for file in file_paths]  # 去掉文件后缀

    # 读取并清理每个文件的代码
    for file_path in file_paths:
        print(f"格式化处理文件: {os.path.basename(file_path)}")  # 提示当前处理的文件
        source = read_file(file_path)
        cleaned_code = remove_comments_and_docstrings(source)
        cleaned_codes[file_path] = cleaned_code

    print("开始计算相似度...")  # 提示开始计算相似度
    # 初始化相似度矩阵
    similarity_matrix = [[0] * len(file_names) for _ in range(len(file_names))]

    # 计算相似度并存储结果
    for i in range(len(file_paths)):
        for j in range(len(file_paths)):
            if i != j:
                similarity = calculate_similarity(cleaned_codes[file_paths[i]], cleaned_codes[file_paths[j]])
                similarity_matrix[i][j] = similarity * 100

    # 创建 DataFrame 并输出到 Excel
    df = pd.DataFrame(similarity_matrix, columns=file_names, index=file_names)
    output_excel_file = os.path.join(output_folder, '代码重复率.xlsx')  # 输出 Excel 文件路径
    df.to_excel(output_excel_file, index=True)  # 将 DataFrame 输出到 Excel 文件

    print(f'代码重复率数据已成功输出到 {output_excel_file}')

    # 输出相似代码片段
    with open(os.path.join(output_folder, '重复代码片段.txt'), 'w', encoding='utf-8') as f:
        print(f'正在打印相似代码片段到重复代码片段.txt中...')
        f.write("相似代码片段:\n")  # 添加标题        
        for i in range(len(file_paths)):
            file1 = file_paths[i]
            for j in range(len(file_paths)):
                if i != j:
                    file2 = file_paths[j]
                    similar_code = extract_similar_code(cleaned_codes[file1], cleaned_codes[file2])
                    if similar_code:  # 只在相似代码非空时输出
                        f.write(f"\n——— {os.path.basename(file1)} 与 {os.path.basename(file2)} 的相似代码片段 ———\n")
                        f.write(similar_code + "\n")

    print(f'重复代码片段已输出到重复代码片段.txt中。')

if __name__ == "__main__":
    # folder_path = '测试数据集'  # 指定文件夹名称
    # main(folder_path)
    if len(sys.argv) < 2:
        print("请提供需要处理的文件夹路径。")
        sys.exit(1)

    folder_path = sys.argv[1]  # 从命令行参数获取文件夹路径
    main(folder_path)
