
import numpy as np
# from collections import defaultdict
#
# class NaiveBayes:
#     def __init__(self):
#         self.class_priors = {}
#         self.feature_probs = defaultdict(lambda: defaultdict(float))
#
#     def train(self, X, y):
#
#         n_samples = len(y)
#         unique_classes = set(y)
#
#         for c in unique_classes:
#             self.class_priors[c] = np.sum(np.array(y) == c) / n_samples
#
#         for c in unique_classes:
#             class_samples = [X[i] for i in range(n_samples) if y[i] == c]
#             class_samples = np.array(class_samples)
#
#             for feature_index in range(class_samples.shape[1]):
#                 feature_values, counts = np.unique(class_samples[:, feature_index], return_counts=True)
#                 total_count = counts.sum()
#
#                 for value, count in zip(feature_values, counts):
#                     self.feature_probs[feature_index][(value, c)] = count / total_count
#
#     def predict(self, X):
#         predictions = []
#
#         for sample in X:
#             class_scores = {}
#
#             for c in self.class_priors.keys():
#                 class_scores[c] = np.log(self.class_priors[c])
#
#                 for feature_index, feature_value in enumerate(sample):
#                     if (feature_value, c) in self.feature_probs[feature_index]:
#                         class_scores[c] += np.log(self.feature_probs[feature_index][(feature_value, c)])
#
#             predictions.append(max(class_scores, key=class_scores.get))
#
#         return predictions
#
# if __name__ == "__main__":
#     X_train = np.array([[1, 0], [1, 1], [0, 0], [0, 1]])
#     y_train = np.array(["A", "A", "B", "B"])
#
#     X_test = np.array([[1, 0], [0, 0]])
#
#     model = NaiveBayes()
#     model.train(X_train, y_train)
#     predictions = model.predict(X_test)
#     print("Predictions:", predictions)


#（‘李四’,2210200310,‘男’,'自动化',405,4,4）
#'宿舍已满，无法分配'
#
#
#