import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
import tensorflow as tf
from matplotlib import pyplot as plt
from experiments.experiments1 import get_is_correct_arr, count_elements_above_threshold, save_list_to_file

# 加载鸢尾花数据集
iris = load_iris()
X = iris.data  # 特征
y = iris.target  # 标签

# 数据标准化（对特征进行缩放，使其均值为0，方差为1）
scaler = StandardScaler()
X = scaler.fit_transform(X)

# 将标签转换为one-hot编码
y = to_categorical(y, num_classes=3)

# 将数据集划分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 构建一个简单的多层感知器模型
model = Sequential()
model.add(Dense(10, input_shape=(4,), activation='relu'))  # 输入层到隐藏层，有10个神经元，激活函数为ReLU
model.add(Dense(10, activation='relu'))  # 第二个隐藏层，有10个神经元，激活函数为ReLU
model.add(Dense(3, activation='softmax'))  # 输出层，有3个神经元（对应3个类别），使用softmax激活函数

# 编译模型，使用交叉熵损失函数和Adam优化器
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# 获取优化器和损失函数
optimizer = tf.keras.optimizers.Adam()
loss_function = tf.keras.losses.CategoricalCrossentropy()

# 设置训练参数
epochs = 200
threshold = 30
threshold_value = 30
batch_size = 5
steps_per_epoch = len(X_train) // batch_size

predicted_result = []

# 开始训练
for epoch in range(epochs):
    print(f"Epoch {epoch + 1}/{epochs}")

    # 打乱训练数据
    indices = np.arange(X_train.shape[0])
    np.random.shuffle(indices)
    X_train = X_train[indices]
    y_train = y_train[indices]

    # 逐批训练
    for step in range(steps_per_epoch):
        start = step * batch_size
        end = start + batch_size
        X_batch = X_train[start:end]
        y_batch = y_train[start:end]

        with tf.GradientTape() as tape:
            predictions = model(X_batch, training=True)
            loss = loss_function(y_batch, predictions)

        # 计算梯度并更新参数
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    # 计算每个epoch结束时的准确率
    train_predictions = model(X_train, training=False)
    train_accuracy = accuracy_score(np.argmax(y_train, axis=1), np.argmax(train_predictions, axis=1))
    print(f" - accuracy: {train_accuracy:.4f}")

    # 在测试集上评估模型
    y_pred = model.predict(X_test)
    y_pred_classes = np.argmax(y_pred, axis=1)
    y_true = np.argmax(y_test, axis=1)

    # 记录预测结果
    is_correct_arr = get_is_correct_arr(y_true, y_pred_classes)
    predicted_result.append(is_correct_arr)
    # 计算准确率
    accuracy = accuracy_score(y_true, y_pred_classes)
    print(f"测试集准确率: {accuracy:.2f}")
# print(predicted_result)
predicted_result = tf.convert_to_tensor(predicted_result, dtype=tf.int32)

# 计算可以忽略的比例
ignore_rate_list = []
for i in range(1, epochs+1):
    if i < threshold:
        ignore_rate_list.append(0)
        continue
    span_tensor = predicted_result[i-30:i]
    sum_along_columns = tf.reduce_sum(span_tensor, axis=0)
    ignore_num = count_elements_above_threshold(sum_along_columns,threshold_value)
    ignore_rate = ignore_num/tf.size(sum_along_columns).numpy()*100
    ignore_rate_list.append(ignore_rate)

print(ignore_rate_list)
save_list_to_file(ignore_rate_list, "./data_result/iris.txt")