import os
import re
from collections import Counter

import fitz
import jieba
import matplotlib.pyplot as plt
import pytesseract
from PIL import Image
from wordcloud import WordCloud


def read_in_chunks(file_object, chunk_size=1024 * 1024):
    while True:
        data = file_object.read(chunk_size)
        if not data:
            break
        yield data


def process_text(file_path):
    counter = Counter()
    with open(file_path, 'r', encoding='utf-8') as file:
        for text in read_in_chunks(file):
            # 去除非中文字符
            piece = re.sub(r'[^\u4e00-\u9fa5]', '', text)

            # jieba.analyse.set_stop_words('stopwords.txt')
            # 加载停用词
            stopwords = set()
            with open('stopwords.txt', 'r', encoding='utf-8') as f:
                stopwords = set(line.strip() for line in f)

            # 加载自定义词典
            jieba.load_userdict('mydict.txt')
            # 分词
            words = jieba.cut(piece)

            # 过滤停用词并统计词频
            filtered_words = [word for word in words if word not in stopwords]
            # 更新计数器
            counter.update(filtered_words)
    return counter


def find_pdf_files(directory):
    pdf_files = []
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.endswith('.pdf'):
                pdf_files.append(os.path.join(root, file))
    return pdf_files


def pdf_to_ocr(pdf_path):
    text_path = pdf_path.replace(".pdf", ".txt");
    if os.path.exists(text_path) and os.path.getsize(text_path) > 0:
        return text_path
    with open(text_path, 'w') as file:
        file.write('')
    doc = fitz.open(pdf_path)
    try:
        for page_num in range(len(doc)):
            # 获取页面
            page = doc.load_page(page_num)

            # 将页面渲染为图像（单色模式适合OCR）
            pix = page.get_pixmap(matrix=fitz.Matrix(300 / 72, 300 / 72), alpha=False)
            img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)

            # 使用pytesseract进行OCR识别
            # text = pytesseract.image_to_string(img, lang='eng')  # 这里lang参数根据需要识别的语言设置
            text = pytesseract.image_to_string(img, lang='chi_sim')
            # 打印识别出的文本
            print(f"Page {page_num + 1}:")
            no_space_text = text.replace(" ", "")
            with open(text_path, "a", encoding="utf-8") as file:
                file.write(no_space_text)

        return text_path
    finally:
        # 关闭文档
        doc.close()


directory_path = 'D:\opt'  # 指定要遍历的目录路径
pdf_files_list = find_pdf_files(directory_path)


def genWordCloud(word_counts):
    # 生成词云图
    wordcloud = WordCloud(font_path='simhei.ttf', width=800, height=400,
                          background_color='white').generate_from_frequencies(word_counts)

    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用黑体显示中文
    plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号
    # 显示词云图
    plt.figure(figsize=(10, 5))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')  # 不显示坐标轴
    plt.title('热词云图')
    plt.savefig(pdf_file.replace(".pdf", ".png"), dpi=300, bbox_inches='tight')
    # plt.show()


if __name__ == '__main__':
    for pdf_file in pdf_files_list:
        text_path = pdf_to_ocr(pdf_file)
        word_counts = process_text(text_path)
        # 获取出现频率最高的top_n个词
        # hot_words = word_counts.most_common(10)
        genWordCloud(word_counts)
