File size: 3,543 Bytes
43bfed9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
#3 resnet block version
import tensorflow as tf
import scipy.io as sio
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow_addons.optimizers import AdamW  #pip install tensorflow_addons (Need to match tf version)

def residual_block(x, filters, dilation_rate,kernel_size=3):
    # Shortcut分支
    shortcut = x
    
    # 第一个卷积层
    x = tf.keras.layers.BatchNormalization()(x)
    x = tf.keras.layers.ReLU()(x)
    x = tf.keras.layers.SeparableConv2D(filters, kernel_size, depth_multiplier=2,dilation_rate=dilation_rate, padding='same')(x)
    x = tf.keras.layers.Conv2D(filters, 1, padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    
    # 第二个卷积层
    x = tf.keras.layers.ReLU()(x)
    x = tf.keras.layers.SeparableConv2D(filters, kernel_size,depth_multiplier=2, dilation_rate=dilation_rate, padding='same')(x)
    x = tf.keras.layers.Conv2D(filters, 1, padding='same')(x)
    
    # 如果维度不匹配,对shortcut进行适当的变换
    if shortcut.shape[-1] != filters:
        shortcut = tf.keras.layers.Conv2D(filters, 1, padding='same')(shortcut)
    
    # 相加操作
    x = tf.keras.layers.Add()([x, shortcut])
    x = tf.keras.layers.ReLU()(x)
    
    return x

def build_resnet(input_shape):
    inputs = tf.keras.layers.Input(shape=input_shape)
    
    # 初始卷积层
    x = tf.keras.layers.Conv2D(64,3, strides=1, padding='same')(inputs)
    x = tf.keras.layers.BatchNormalization()(x)
    x = tf.keras.layers.ReLU()(x)
       
    # 堆叠ResNet块
    x = residual_block(x, filters=128, dilation_rate=(2,3))
    x = residual_block(x, filters=448, dilation_rate=(3,6))
    x = residual_block(x, filters=128, dilation_rate=(2,3))
    
    outputs = tf.keras.layers.Conv2D(Morder, 3, strides=1, padding='same')(x)
    outputs = tf.keras.activations.sigmoid(outputs)   
    
    model = tf.keras.Model(inputs, outputs)
    return model

def load_data(m):
    data_inputs = []
    data_labels = []
    for n in range(1,m+1):        
        input_data = sio.loadmat(f"SNR{n}_input.mat")["input_save"]
        label_data = sio.loadmat(f"SNR{n}_label.mat")["label_save"]
        input_data = np.transpose(input_data, (3,0,1,2)) 
        label_data = np.transpose(label_data, (3,0,1,2)) 
        data_inputs.append(input_data)
        data_labels.append(label_data)
    data_inputs = np.concatenate(data_inputs)
    data_labels = np.concatenate(data_labels)
    return data_inputs, data_labels
# 定义输入形状和类别数量
start = time.time()
input_shape = (312, 14, 6)
Morder = 4 # 16QAM
SNR_number = 10

# 创建ResNet模型
resnet_model = build_resnet(input_shape)
# 打印模型概要
resnet_model.summary()
##################################################################

# 定义AdamW优化器,并设置学习率为0.01
#optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
optimizer = AdamW(learning_rate=0.01, weight_decay=1e-4)
# tensorboard
log_dir = "./log"
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir =log_dir)
# 编译模型
resnet_model.compile(optimizer=optimizer, loss='binary_crossentropy',callbacks=[tensorboard_callback])
# read data
X_data,y_data = load_data(SNR_number)
print(X_data.shape,y_data.shape)
X_train, X_val, y_train, y_val = train_test_split(X_data, y_data, test_size=0.3, random_state=42)
# 训练模型
resnet_model.fit(X_train, y_train, epochs=10, batch_size=20, validation_data=(X_val, y_val))
# 保存
resnet_model.save("deeprx.h5")
endt = time.time()
print(endt-start)