import math
import os
import jieba
import csv


def load_filter_chars(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        return [line.strip() for line in f if line.strip()]


def calculate_entropy(tokens):
    total_count = len(tokens)
    frequencies = {}
    for token in tokens:
        frequencies[token] = frequencies.get(token, 0) + 1
    entropy = -sum((count / total_count) * math.log2(count / total_count) for count in frequencies.values())
    return entropy


def tokenize_text(text, unit='char', filter_chars=None):
    if unit == 'char':
        return [char for char in text if char not in filter_chars]
    elif unit == 'word':
        return [word for word in jieba.lcut(text) if word not in filter_chars]
    else:
        raise ValueError("Invalid unit. Please choose 'char' or 'word'.")


def process_folder(folder_path, filter_file, output_file):
    filter_chars = load_filter_chars(filter_file)
    results = []
    for root, dirs, files in os.walk(folder_path):
        for file in files:
            if file.endswith('.txt'):
                file_path = os.path.join(root, file)
                try:
                    with open(file_path, 'r', encoding='gb18030') as f:
                        text = f.read()
                        tokens_char = tokenize_text(text, 'char', filter_chars)
                        tokens_word = tokenize_text(text, 'word', filter_chars)
                        char_entropy = calculate_entropy(tokens_char)
                        word_entropy = calculate_entropy(tokens_word)
                        results.append([file_path, char_entropy, word_entropy])
                except UnicodeDecodeError:
                    print(f"Skipping file due to encoding error: {file_path}")

    # Write results to a CSV file
    with open(output_file, 'w', newline='', encoding='utf-8') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(['File Path', 'Character Entropy', 'Word Entropy'])
        writer.writerows(results)


# Main program entry
folder_path = './jyxstxtqj_downcc'
filter_file = './cn_stopwords.txt'
output_file = 'entropy_results.csv'
process_folder(folder_path, filter_file, output_file)
