#!/usr/bin/env python
# -*- coding:utf-8 -*-

# file:calculate_similarity.py
# author:张世航
# datetime:2024/11/9 17:45
# software: PyCharm
"""
this is function  description 
"""
# import module your need


# 8 在2个文件中存放了英文计算机技术文章(可以选择2篇关于Python技术文件操作处理技巧的2篇英文技术文章), 请读取文章内容,进行词频的统计;并分别输出统计结果到另外的文件存放;
#     比较这2篇文章的相似度(如果词频最高的前10个词,重复了5个,相似度就是50%;重复了6个,相似度就是60% ,......);

import jieba
from collections import Counter


def get_text(file_path):
    """读取文本文件"""
    try:
        with open(file_path, "r", encoding="utf-8") as file:
            return file.read()
    except Exception as e:
        print(f"发生错误: {e}")
        return ""


def extract_keywords(text):
    """提取关键词并返回前10个词语"""
    # 使用jieba进行分词
    words = text.split()  # 直接按空格分割
    word_counts = Counter(words)  # 统计单词频率
    return word_counts.most_common(10)  # 返回前10个单词及其频率


def write_keywords_to_file(keywords, file_path):
    """将关键词写入文件"""
    try:
        with open(file_path, "w", encoding="utf-8") as file:
            for word, count in keywords:
                file.write(f"{word}: {count}\n")
    except Exception as e:
        print(f"发生错误: {e}")


def calculate_similarity(keywords1, keywords2):
    """计算两篇文章的相似度"""
    words1 = set(word for word, _ in keywords1)
    words2 = set(word for word, _ in keywords2)

    common_words = words1.intersection(words2)
    similarity = len(common_words) / 10 * 100  # 计算相似度
    return similarity


def main():
    # 请确保将文件路径替换为您文章的实际路径
    file_path1 = "Article1"  # 替换为实际文件名
    file_path2 = "Article2"  # 替换为实际文件名

    # 读取文章内容
    text1 = get_text(file_path1)
    text2 = get_text(file_path2)

    # 提取关键词
    keywords1 = extract_keywords(text1)
    keywords2 = extract_keywords(text2)

    # 将关键词写入文件
    write_keywords_to_file(keywords1, "keywords_article1.txt")
    write_keywords_to_file(keywords2, "keywords_article2.txt")

    # 计算相似度
    similarity = calculate_similarity(keywords1, keywords2)
    print(f"两篇文章的相似度: {similarity:.2f}%")


if __name__ == "__main__":
    main()