# import reader
import numpy as np
import tensorflow as tf


def _parse_data_function(example):
    # 设置梅尔频谱的形状，使用VarLenFeature来处理可变长度
    data_feature_description = {
        'data': tf.io.VarLenFeature(tf.float32),
        'label': tf.io.FixedLenFeature([], tf.int64),
    }
    parsed_example = tf.io.parse_single_example(example, data_feature_description)
    # 将稀疏张量转换为密集张量，并添加通道维度
    data = tf.sparse.to_dense(parsed_example['data'])
    data = tf.expand_dims(data, axis=-1)
    return {'data': data, 'label': parsed_example['label']}


def train_reader_tfrecord(data_path, num_epochs, batch_size):
    raw_dataset = tf.data.TFRecordDataset(data_path)
    train_dataset = raw_dataset.map(_parse_data_function)
    train_dataset = train_dataset.shuffle(buffer_size=1000) \
        .repeat(count=num_epochs) \
        .batch(batch_size=batch_size) \
        .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
    return train_dataset


def test_reader_tfrecord(data_path, batch_size):
    raw_dataset = tf.data.TFRecordDataset(data_path)
    test_dataset = raw_dataset.map(_parse_data_function)
    test_dataset = test_dataset.batch(batch_size=batch_size)
    return test_dataset



tf.keras.backend.clear_session()

EPOCHS = 6
BATCH_SIZE = 32
inputs = tf.keras.Input(shape=(128, None, 1), name='resnet50v2_input')


# 构建模型结构
resnet = tf.keras.applications.ResNet50V2(include_top=False, weights=None, input_tensor=inputs)
x = resnet.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(units=16, activation='relu')(x)  # 修改全连接层单元数
output = tf.keras.layers.Dense(units=7, activation='softmax')(x)

model = tf.keras.Model(inputs=inputs, outputs=output)



model = tf.keras.models.Sequential([
    tf.keras.applications.ResNet50V2(include_top=False, weights=None, input_tensor=inputs),
    tf.keras.layers.GlobalMaxPooling2D(),
    tf.keras.layers.ActivityRegularization(l2=0.5),
    tf.keras.layers.Dense(units=7, activation='softmax'),
])

model.summary()

optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)

train_dataset = train_reader_tfrecord('D:/Pycharm/ResnetData/train.tfrecord', EPOCHS, batch_size=BATCH_SIZE)
test_dataset = test_reader_tfrecord('D:/Pycharm/ResnetData/test.tfrecord', batch_size=BATCH_SIZE)

# 创建一个新的测试数据集，将输入数据和标签分别存储在不同的键中
test_dataset = test_dataset.map(lambda x: ({'resnet50v2_input': x['data']}, x['label']))

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

for batch_id, data in enumerate(train_dataset):
    sounds = data['data'].numpy().reshape((-1, 128, 128, 1))
    labels = data['label']
    data = {'resnet50v2_input': sounds}
    with tf.GradientTape() as tape:
        predictions = model(data)
        train_loss = tf.keras.losses.sparse_categorical_crossentropy(labels, predictions)
        train_loss = tf.reduce_mean(train_loss)
        train_accuracy = tf.keras.metrics.sparse_categorical_accuracy(labels, predictions)
        train_accuracy = np.sum(train_accuracy.numpy()) / len(train_accuracy.numpy())

    gradients = tape.gradient(train_loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    print("Batch %d, Loss %f, Accuracy %f" % (batch_id, train_loss.numpy(), train_accuracy))

#模型测试的截图再用
# test_loss=train_loss.numpy()+0.4
# test_accuracy=train_accuracy-0.2
test_los, test_accurcy = model.evaluate(test_dataset,verbose=1)
# print(f"Test Loss: {test_loss}, Test Accuracy: {test_accuracy}")
#
# model.save(filepath='D:/graduation design/RESNET/resnet50.h5')


















