import os
import re
from collections import Counter
from io import StringIO

import fitz
import jieba
import matplotlib.pyplot as plt
import pytesseract
from PIL import Image
from jieba import analyse
from wordcloud import WordCloud


def print_wordcloud(word_counts):
    print(word_counts)
    sorted_frequencies = sorted(word_counts, key=lambda x: x[1], reverse=True)

    # 生成词云图
    wordcloud = WordCloud(font_path='simhei.ttf', width=800, height=400,
                          background_color='white').generate_from_frequencies(sorted_frequencies)

    # 显示词云图
    plt.figure(figsize=(10, 5))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')  # 不显示坐标轴
    plt.title('热词云图')
    plt.show()


def find_pdf_files(directory):
    pdf_files = []
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.endswith('.pdf'):
                pdf_files.append(os.path.join(root, file))
    return pdf_files


def count_hot_chinese_words(text, focus_set=None, exclude_set=None, top_n=10):
    """
    统计文本中出现频率较高的中文热词，同时考虑分词集合和排除分词集合。

    :param text: 原始文本字符串
    :param focus_set: 重点关注的词汇集合，统计时会优先考虑这些词
    :param exclude_set: 需要排除的词汇集合，这些词不会被统计
    :param top_n: 输出的热词数量
    :return: 热词及其频率列表
    """
    # 去除非中文字符
    chinese_only_text = re.sub(r'[^\u4e00-\u9fa5]', '', text)

    jieba.analyse.set_stop_words('stopwords.txt')
    # 加载自定义词典
    jieba.load_userdict('mydict.txt')
    # 使用jieba进行分词
    words = jieba.lcut(chinese_only_text)

    # 如果提供了关注词汇集合，仅保留其中的词
    if focus_set:
        words = [word for word in words if word in focus_set]

    # 排除不需要的词汇
    if exclude_set:
        words = [word for word in words if word not in exclude_set]

    # 统计词频，考虑到可能有用户只关注某些词，而这些词在原始文本中并未出现的情况，
    # 可以选择是否仅统计实际出现过的词，这里默认统计所有处理后的词
    word_counts = Counter(words)

    # 获取出现频率最高的top_n个词
    hot_words = word_counts.most_common(top_n)
    # 生成词云图
    wordcloud = WordCloud(font_path='simhei.ttf', width=800, height=400,
                          background_color='white').generate_from_frequencies(word_counts)
    # 显示词云图
    plt.figure(figsize=(10, 5))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')  # 不显示坐标轴
    plt.title('热词云图')
    plt.show()
    return hot_words


def pdf_to_ocr(pdf_path):
    try:  # 区
        buffer = StringIO()
        doc = fitz.open(pdf_path)
        for page_num in range(len(doc)):
            # 获取页面
            page = doc.load_page(page_num)

            # 将页面渲染为图像（单色模式适合OCR）
            pix = page.get_pixmap(matrix=fitz.Matrix(300 / 72, 300 / 72), alpha=False)
            img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)

            # 使用pytesseract进行OCR识别
            # text = pytesseract.image_to_string(img, lang='eng')  # 这里lang参数根据需要识别的语言设置
            text = pytesseract.image_to_string(img, lang='chi_sim')
            # 打印识别出的文本
            print(f"Page {page_num + 1}:")

            no_space_text = text.replace(" ", "")
            # buffer.write(no_space_text)

        # alltext = buffer.getvalue()
        # # # 自定义的分词集合（关注词汇）
        # # focus_words = {'园区', '外网', '端口'}
        # # # 排除的分词集合（停用词）
        # # exclude_words = {'一个', '这是', '并且'}
        # # 调用函数
        # top_chinese_words = count_hot_chinese_words(alltext)
        # # print_wordcloud(top_chinese_words)
        # print(top_chinese_words)
    finally:
        buffer.close()
        # 关闭文档
        doc.close()


directory_path = 'D:\opt'  # 指定要遍历的目录路径
pdf_files_list = find_pdf_files(directory_path)
for pdf_file in pdf_files_list:
    pdf_to_ocr(pdf_file)
