import numpy as np
from collections import defaultdict
import math

class ManualNaiveBayes:
    def __init__(self):
        self.class_priors = {}
        self.feature_likelihoods = defaultdict(dict)
        self.classes = None
    
    def fit(self, X, y):
        """手动实现朴素贝叶斯训练过程"""
        self.classes = np.unique(y)
        n_samples = len(y)
        
        # 计算先验概率
        for c in self.classes:
            self.class_priors[c] = np.sum(y == c) / n_samples
        
        # 计算条件概率（使用拉普拉斯平滑）
        n_features = X.shape[1]
        for c in self.classes:
            X_c = X[y == c]
            total_features = X_c.sum(axis=0) + 1  # 拉普拉斯平滑
            
            for feature_idx in range(n_features):
                feature_count = X_c[:, feature_idx].sum() + 1  # 拉普拉斯平滑
                self.feature_likelihoods[c][feature_idx] = feature_count / total_features.sum()
    
    def predict(self, X):
        """手动实现预测"""
        predictions = []
        for sample in X:
            class_scores = {}
            
            for c in self.classes:
                # 计算后验概率的对数（避免数值下溢）
                score = math.log(self.class_priors[c])
                
                for feature_idx, feature_value in enumerate(sample):
                    if feature_value > 0:  # 如果特征出现
                        likelihood = self.feature_likelihoods[c].get(feature_idx, 1e-8)
                        score += math.log(likelihood)
                
                class_scores[c] = score
            
            # 选择概率最大的类别
            predicted_class = max(class_scores, key=class_scores.get)
            predictions.append(predicted_class)
        
        return np.array(predictions)

# 测试手动实现的朴素贝叶斯
from sklearn.feature_extraction.text import CountVectorizer

# 使用简单的文本数据
texts = [
    'good great awesome',
    'bad terrible awful',
    'good nice excellent',
    'bad horrible disgusting',
    'great fantastic wonderful'
]
labels = ['positive', 'negative', 'positive', 'negative', 'positive']

# 特征提取
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(texts).toarray()
y = np.array(labels)

print("词汇表:", vectorizer.get_feature_names_out())
print("特征矩阵:")
print(X)

# 使用手动实现的朴素贝叶斯
manual_nb = ManualNaiveBayes()
manual_nb.fit(X, y)

# 预测
test_texts = ['good great', 'bad terrible']
X_test = vectorizer.transform(test_texts).toarray()
predictions = manual_nb.predict(X_test)

print(f"\n手动实现朴素贝叶斯预测:")
for text, pred in zip(test_texts, predictions):
    print(f"文本: '{text}' -> 预测: {pred}")

# 与sklearn对比
from sklearn.naive_bayes import MultinomialNB

sklearn_nb = MultinomialNB()
sklearn_nb.fit(X, y)
sklearn_pred = sklearn_nb.predict(X_test)

print(f"\nSklearn朴素贝叶斯预测:")
for text, pred in zip(test_texts, sklearn_pred):
    print(f"文本: '{text}' -> 预测: {pred}")