import numpy as np
from collections import defaultdict
import warnings

# 忽略警告
warnings.filterwarnings("ignore")

# 加载数据集
def load_dataset(filename):
    data = np.loadtxt(filename, delimiter=',', dtype=str)
    features = data[:, 0:4].astype(float)  # 特征转换为浮点数
    labels = data[:, 4]  # 保留字符串标签
    return features, labels

# 计算先验概率和条件概率
def calculate_probabilities(features, labels):
    num_samples = len(features)
    num_features = len(features[0])
    probabilities = defaultdict(lambda: {'prior': 0, 'features': {}})
    
    # 计算每个类别的先验概率
    for label in np.unique(labels):
        probabilities[label]['prior'] = np.sum(labels == label) / num_samples
    
    # 计算每个特征在每个类别下的条件概率
    for i in range(num_features):
        feature_values = np.unique(features[:, i])
        for value in feature_values:
            for label in np.unique(labels):
                probabilities[label]['features'][(i, value)] = (np.sum((features[:, i] == value) & (labels == label))) / (np.sum(labels == label))
    
    return probabilities

# 分类函数
def classify(probabilities, sample):
    results = {}
    for label in probabilities:
        results[label] = probabilities[label]['prior']
        for i in range(len(sample)):
            feature_value = sample[i]
            key = (i, feature_value)
            if key in probabilities[label]['features']:
                results[label] *= probabilities[label]['features'][key]
            else:
                results[label] *= 1  # 如果特征值未出现，则假设等概率
    return max(results, key=results.get)

# 主函数
def main():
    filename = 'iris.data'
    features, labels = load_dataset(filename)
    
    # 计算概率
    probabilities = calculate_probabilities(features, labels)
    
    # 测试模型
    test_sample = features[0]  # 使用第一个样本作为测试样本
    predicted_class = classify(probabilities, test_sample)
    print(f"The predicted class for the first sample is: {predicted_class}")

if __name__ == "__main__":
    main()