
import numpy as np

# 读取数据
def get_data():
    # 读入数据
    file = open('data.csv', encoding='utf-8')
    # Python strip() 方法用于移除字符串头尾指定的字符（默认为空格）。
    filedata = [line.strip('\n').split(',')[1:] for line in file]
    idx1 = filedata[0].index('密度')
    idx2 = filedata[0].index('含糖率')
    for i in range(1, len(filedata)):
        filedata[i][idx1] = float(filedata[i][idx1])
        filedata[i][idx2] = float(filedata[i][idx2])
    filedata = filedata[1:]
    return filedata

# 进行模型训练
def fit(filedata, lapula_correct=True):
    diff_class = {i: set() for i in range(len(filedata[0]))}
    for raw in filedata:
        for j in range(len(raw)):
            diff_class[j].add(raw[j])
    count = {}
    for raw in filedata:
        for j in range(len(raw)):
            label = raw[-1]
            # discrete attribute
            if type(raw[j]) is not float:
                tup = (raw[j], label)
                count[tup] = (count.get(tup, [0])[0] + 1, len(diff_class[j]))
                # continuous attribute
            else:
                tup = (j, label)
                if tup not in count:
                    count[tup] = [raw[j]]
                else:
                    count[tup].append(raw[j])
    prob = {}
    total_case = len(filedata)
    for i in count:
        if type(count[i]) is list:
            mean = np.mean(count[i])
            std = np.std(count[i])
            prob[i] = (mean, std)
        else:
            x, c = i
            if lapula_correct:
                if x == c:
                    prob[x] = float(count[i][0] + 1) / (total_case + count[i][1])
                else:
                    prob[i] = float(count[i][0] + 1) / (count[(c, c)][0] + count[i][1])
            else:
                if x == c:
                    prob[x] = float(count[i][0]) / total_case
                else:
                    prob[i] = float(count[i][0]) / count[(c, c)][0]
    return prob

# 进行预测
def predict(data, prob):
    label = ['是', '否']
    p1, p2 = prob[label[0]], prob[label[1]]
    val = [np.log(p1), np.log(p2)]
    for i in data:
        for j in range(2):
            if type(i) is float:
                idx = data.index(i)
                tup = (idx, label[j])
                if tup in prob:
                    mean, std = prob[tup]
                    p = np.exp(-(i - mean) ** 2 / (2 * std ** 2)) / (np.sqrt(2 * np.pi) * std)
                else:
                    p = 1e-10  # A small non-zero value for unseen data
            else:
                tup = (i, label[j])
                if tup in prob:
                    p = prob[tup]
                else:
                    p = 1e-10  # A small non-zero value for unseen data
            val[j] += np.log(p)
    return max(label, key=lambda x: val[label.index(x)])

# Implementing the Naive Bayes classifier (fit and predict functions)
# The fit and predict functions are already provided in the code snippet you shared

def initialize_weights(n_samples):
    return np.ones(n_samples) / n_samples

def adaboost_train(data, num_iterations):
    n_samples = len(data)
    weights = initialize_weights(n_samples)
    classifiers = []
    classifier_weights = []
    for _ in range(num_iterations):
        # Train Naive Bayes classifier
        prob = fit(data)
        error = 0.0
        predictions = []
        for i in range(n_samples):
            pred = predict(data[i][:-1], prob)
            predictions.append(pred)
            if pred != data[i][-1]:
                error += weights[i]
        # Calculate classifier weight
        alpha = 0.5 * np.log((1.0 - error) / max(error, 1e-10))
        classifier_weights.append(alpha)
        # Update sample weights
        for i in range(n_samples):
            if predictions[i] == data[i][-1]:
                weights[i] *= np.exp(-alpha)
            else:
                weights[i] *= np.exp(alpha)
        weights /= np.sum(weights)
        classifiers.append((prob, alpha))
    return classifiers, classifier_weights

def adaboost_predict(data, classifiers, classifier_weights):
    n_samples = len(data)
    predictions = []
    for sample in data:
        pred_sum = 0.0
        for clf, alpha in zip(classifiers, classifier_weights):
            prob, _ = clf
            pred = predict(sample[:-1], prob)
            pred_sum += alpha * (1 if pred == '是' else -1)
        final_pred = '是' if pred_sum >= 0 else '否'
        predictions.append(final_pred)
    return predictions

if __name__ == '__main__':
    # Load data
    data = get_data()
    # Number of iterations
    num_iterations = 5
    # AdaBoost training
    classifiers, classifier_weights = adaboost_train(data, num_iterations)
    # Test data
    test_data = [['青绿', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', 0.697, 0.460]]
    # AdaBoost prediction
    predictions = adaboost_predict(test_data, classifiers, classifier_weights)
    # Output result
    print("该样例测试结果是：")
    if predictions[0] == '是':
        print("好瓜")
    else:
        print("坏瓜")
