import os
import re
import numpy as np
from collections import defaultdict

def load_local_data(data_dir, max_categories=None, max_files_per_category=None):
    texts = []
    labels = []
    label_names = os.listdir(data_dir)
    if max_categories:
        label_names = label_names[:max_categories]
    label_to_index = {name: idx for idx, name in enumerate(label_names)}

    for label_name in label_names:
        label_dir = os.path.join(data_dir, label_name)
        if os.path.isdir(label_dir):
            print(f"Processing category: {label_name}")
            for i, file_name in enumerate(os.listdir(label_dir)):
                if max_files_per_category and i >= max_files_per_category:
                    break
                file_path = os.path.join(label_dir, file_name)
                with open(file_path, 'r', errors='ignore') as f:
                    content = f.read()
                    texts.append(content)
                    labels.append(label_to_index[label_name])
    print(f"Loaded {len(texts)} texts from {data_dir}")
    return texts, labels, label_names


def preprocess_text(text):
    words = re.findall(r'\b[a-zA-Z]+\b', text.lower())
    return words

def build_vocab(data, max_vocab_size=5000):
    word_freq = defaultdict(int)
    for text in data:
        words = preprocess_text(text)
        for word in words:
            word_freq[word] += 1
    sorted_vocab = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
    vocab = set(word for word, freq in sorted_vocab[:max_vocab_size])
    print(f"Vocabulary size: {len(vocab)}")
    return vocab

def compute_word_counts(data, labels, vocab):
    word_counts = {label: defaultdict(int) for label in np.unique(labels)}
    label_counts = defaultdict(int)

    for i, (text, label) in enumerate(zip(data, labels)):
        if i % 100 == 0:
            print(f"Processing document {i+1}/{len(data)}")
        words = preprocess_text(text)
        for word in words:
            if word in vocab:
                word_counts[label][word] += 1
        label_counts[label] += 1
    return word_counts, label_counts

class NaiveBayes:
    def __init__(self, vocab, alpha=1.0):
        self.vocab = vocab
        self.alpha = alpha
        self.word_counts = None
        self.label_counts = None
        self.total_words_per_label = None
        self.priors = None

    def fit(self, data, labels):
        self.word_counts, self.label_counts = compute_word_counts(data, labels, self.vocab)
        self.total_words_per_label = {
            label: sum(self.word_counts[label].values()) for label in self.word_counts
        }
        total_labels = sum(self.label_counts.values())
        self.priors = {label: self.label_counts[label] / total_labels for label in self.label_counts}

    def predict(self, text):
        words = preprocess_text(text)
        label_scores = {}

        for label in self.label_counts:
            log_prob = np.log(self.priors[label])
            for word in words:
                if word in self.vocab:
                    word_likelihood = (
                        self.word_counts[label][word] + self.alpha
                    ) / (self.total_words_per_label[label] + self.alpha * len(self.vocab))
                    log_prob += np.log(word_likelihood)
            label_scores[label] = log_prob

        return max(label_scores, key=label_scores.get)

    def score(self, data, labels):
        predictions = [self.predict(text) for text in data]
        accuracy = np.mean(np.array(predictions) == np.array(labels))
        return accuracy

if __name__ == "__main__":
    train_dir = "data/20news-bydate-train"
    test_dir = "data/20news-bydate-test"

    train_texts, train_labels, label_names = load_local_data(train_dir)
    test_texts, test_labels, _ = load_local_data(test_dir)

    print(f"训练集样本数: {len(train_texts)}, 测试集样本数: {len(test_texts)}")
    print(f"类别数量: {len(label_names)}, 类别名称: {label_names}")

    vocab = build_vocab(train_texts, max_vocab_size=5000)

    print("开始训练模型...")
    nb = NaiveBayes(vocab)
    nb.fit(train_texts, train_labels)
    print("模型训练完成！")

    print("开始测试模型...")
    accuracy = nb.score(test_texts, test_labels)
    print(f"朴素贝叶斯分类器在测试集上的准确率: {accuracy:.4f}")
