import tensorflow as tf
from tensorflow.keras.layers import SimpleRNN, LSTM, GRU,Dense
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential

# 加载IMDB电影评论数据
max_features = 10000  # 词汇表大小
maxlen = 500  # 序列最大长度

(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=max_features)

# 序列填充
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)

def create_rnn_model(rnn_type='simple'):
    model = Sequential()
    
    # 嵌入层（将单词索引转换为密集向量）
    model.add(tf.keras.layers.Embedding(max_features, 32))
    
    # RNN层
    if rnn_type == 'simple':
        model.add(SimpleRNN(32))
    elif rnn_type == 'lstm':
        model.add(LSTM(32))
    elif rnn_type == 'gru':
        model.add(GRU(32))
    
    # 输出层
    model.add(Dense(1, activation='sigmoid'))
    
    return model

# 比较不同RNN变体
rnn_types = ['simple', 'lstm', 'gru']
histories = {}

for rnn_type in rnn_types:
    print(f"\n训练{rnn_type.upper()}模型...")
    model = create_rnn_model(rnn_type)
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    
    history = model.fit(X_train, y_train, 
                       epochs=5, 
                       batch_size=128, 
                       validation_split=0.2,
                       verbose=0)
    histories[rnn_type] = history