import PyPDF2
import jieba

'''
大部分年报是 pdf 格式，转换为 txt 
'''

def pdf_to_txt(pdf_path, txt_path):
    # 打开 PDF 文件 
    with open(pdf_path, 'rb') as pdf_file:
        pdf_reader = PyPDF2.PdfReader(pdf_file)
        # 创建一个空字符串来存储提取的文本
        text = ""
        for page_num in range(len(pdf_reader.pages)):
            page = pdf_reader.pages[page_num]
            text += page.extract_text()
    # 将提取的文本写入 TXT 文件 
    with open(txt_path, 'w', encoding='utf-8') as txt_file:
        txt_file.write(text)

'''
中文分词
'''
def count_word_frequency_chinese(txt_path):
    word_freq = {}
    with open(txt_path, 'r', encoding='utf-8') as txt_file:
        text = txt_file.read()
        words = jieba.lcut(text)
        for word in words:
            if len(word) > 1:  
                if word in word_freq:
                    word_freq[word] += 1
                else:
                    word_freq[word] = 1
    # 直接输出已经排序好的字典，根据频率降序排列
    word_freq = sorted(word_freq.items(), key=lambda item: item[1], reverse=True)
    return word_freq

def count_word_frequency(txt_path, unwanted_words={}):
    word_freq = {}
    with open(txt_path, 'r', encoding='utf-8') as txt_file:
        text = txt_file.read()
        words = text.split()
        for word in words:
            cleaned_word = word.strip('.,!?():;"\'').lower()
            if cleaned_word not in unwanted_words:
                if cleaned_word in word_freq:
                    word_freq[cleaned_word] += 1
                else:
                    word_freq[cleaned_word] = 1
    word_freq = sorted(word_freq.items(), key=lambda item: item[1], reverse=True)
    return word_freq