File size: 2,865 Bytes
39c2220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras import layers

print("TensorFlow ๋ฒ„์ „:", tf.__version__)

# 1. ๋ฐ์ดํ„ฐ ๋กœ๋“œ ๋ฐ ์ „์ฒ˜๋ฆฌ
print("\n1. ๋ฐ์ดํ„ฐ ๋กœ๋“œ ๋ฐ ์ „์ฒ˜๋ฆฌ๋ฅผ ์‹œ์ž‘ํ•ฉ๋‹ˆ๋‹ค...")
# num_words=10000: ๊ฐ€์žฅ ๋นˆ๋„๊ฐ€ ๋†’์€ 1๋งŒ ๊ฐœ์˜ ๋‹จ์–ด๋งŒ ์‚ฌ์šฉ
(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=10000)

print(f"ํ•™์Šต ๋ฐ์ดํ„ฐ ๊ฐœ์ˆ˜: {len(x_train)}")
print(f"ํ…Œ์ŠคํŠธ ๋ฐ์ดํ„ฐ ๊ฐœ์ˆ˜: {len(x_test)}")

# ๋ฌธ์žฅ์˜ ๊ธธ์ด๋ฅผ ๋™์ผํ•˜๊ฒŒ ๋งž์ถ”๊ธฐ ์œ„ํ•ด ํŒจ๋”ฉ(padding) ์ฒ˜๋ฆฌ (maxlen=256)
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=256)
x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=256)
print("๋ฐ์ดํ„ฐ ์ „์ฒ˜๋ฆฌ๊ฐ€ ์™„๋ฃŒ๋˜์—ˆ์Šต๋‹ˆ๋‹ค.")

# 2. LSTM ๋ชจ๋ธ ์ƒ์„ฑ, ํ•™์Šต ๋ฐ ์ €์žฅ
print("\n2. LSTM ๋ชจ๋ธ ํ•™์Šต์„ ์‹œ์ž‘ํ•ฉ๋‹ˆ๋‹ค...")

# LSTM ๋ชจ๋ธ ์•„ํ‚คํ…์ฒ˜ ์ •์˜
lstm_model = keras.Sequential([
    layers.Embedding(input_dim=10000, output_dim=128),
    layers.LSTM(64),
    layers.Dense(1, activation="sigmoid")
])

# ๋ชจ๋ธ ์ปดํŒŒ์ผ
lstm_model.compile(
    loss="binary_crossentropy",
    optimizer="adam",
    metrics=["accuracy"]
)

print("\n--- LSTM ๋ชจ๋ธ ๊ตฌ์กฐ ---")
lstm_model.summary()

# ๋ชจ๋ธ ํ•™์Šต
batch_size = 128
epochs = 1 # ์˜ˆ์ œ์ด๋ฏ€๋กœ epoch๋ฅผ ์ค„์—ฌ์„œ ์‹คํ–‰ ์‹œ๊ฐ„์„ ๋‹จ์ถ•ํ•ฉ๋‹ˆ๋‹ค.
history_lstm = lstm_model.fit(
    x_train, y_train,
    batch_size=batch_size,
    epochs=epochs,
    validation_data=(x_test, y_test)
)

# ๋ชจ๋ธ ํ‰๊ฐ€
score_lstm = lstm_model.evaluate(x_test, y_test, verbose=0)
print(f"\nLSTM ๋ชจ๋ธ ํ…Œ์ŠคํŠธ ๊ฒฐ๊ณผ -> Loss: {score_lstm[0]:.4f}, Accuracy: {score_lstm[1]:.4f}\n")

# ํ•™์Šต๋œ LSTM ๋ชจ๋ธ ์ €์žฅ
lstm_model.save("lstm_model.keras")
print("LSTM ๋ชจ๋ธ์ด 'lstm_model.keras' ํŒŒ์ผ๋กœ ์ €์žฅ๋˜์—ˆ์Šต๋‹ˆ๋‹ค.")


# 3. GRU ๋ชจ๋ธ ์ƒ์„ฑ, ํ•™์Šต ๋ฐ ์ €์žฅ
print("\n3. GRU ๋ชจ๋ธ ํ•™์Šต์„ ์‹œ์ž‘ํ•ฉ๋‹ˆ๋‹ค...")

# GRU ๋ชจ๋ธ ์•„ํ‚คํ…์ฒ˜ ์ •์˜
gru_model = keras.Sequential([
    layers.Embedding(input_dim=10000, output_dim=128),
    layers.GRU(64),
    layers.Dense(1, activation="sigmoid")
])

# ๋ชจ๋ธ ์ปดํŒŒ์ผ
gru_model.compile(
    loss="binary_crossentropy",
    optimizer="adam",
    metrics=["accuracy"]
)

print("\n--- GRU ๋ชจ๋ธ ๊ตฌ์กฐ ---")
gru_model.summary()


# ๋ชจ๋ธ ํ•™์Šต
history_gru = gru_model.fit(
    x_train, y_train,
    batch_size=batch_size,
    epochs=epochs,
    validation_data=(x_test, y_test)
)

# ๋ชจ๋ธ ํ‰๊ฐ€
score_gru = gru_model.evaluate(x_test, y_test, verbose=0)
print(f"\nGRU ๋ชจ๋ธ ํ…Œ์ŠคํŠธ ๊ฒฐ๊ณผ -> Loss: {score_gru[0]:.4f}, Accuracy: {score_gru[1]:.4f}")


# ํ•™์Šต๋œ GRU ๋ชจ๋ธ ์ €์žฅ
gru_model.save("gru_model.keras")
print("GRU ๋ชจ๋ธ์ด 'gru_model.keras' ํŒŒ์ผ๋กœ ์ €์žฅ๋˜์—ˆ์Šต๋‹ˆ๋‹ค.")