import tensorflow as tf
from tensorflow.keras import layers, models, Input
from tensorflow.keras.regularizers import l2
from tensorflow.keras import optimizers, losses
from tensorflow.keras.initializers import TruncatedNormal

def create_generic_model(input_shape, num_classes, l2_reg=0.01, dropout_rate=0.5):
	"""
	构建并返回一个示例1D-CNN模型,用于时间序列六轴数据。

	参数传参说明：
	- input_shape (tuple): 输入张量形状,格式为 (time_steps, channels)。
	  例如 time_steps=128, channels=6 则 input_shape=(128, 6)。
	- num_classes (int): 输出类别数。如果大于1,输出层使用 softmax;否则使用 sigmoid。
	- l2_reg (float, 可选): L2 正则化系数,默认 0.01。用于卷积/全连接层的 kernel_regularizer。
	- dropout_rate (float, 可选): Dropout 比例,默认 0.5。用于中间 Dropout 层以降低过拟合。

	返回：
	- tf.keras.Model: 已编译的 Keras 模型实例(默认使用 Adam 和 categorical_crossentropy)。
	"""

	inputs = layers.Input(shape=input_shape) 
	x = inputs
	# 卷积层1
	# x = layers.Conv1D(30, kernel_size=3, strides=3)(inputs) 
	x = layers.Conv1D(filters=32 * 1, kernel_size=3, strides=3,
					  padding='same', kernel_regularizer=l2(l2_reg),
					  kernel_initializer=TruncatedNormal(mean=0., stddev=0.01, seed=42),
					  name='conv1d_1')(x)
	x = layers.ReLU()(x) 
	x = layers.Conv1D(filters=15 * 1, kernel_size=3, strides=3,
					  padding='same', kernel_regularizer=l2(l2_reg),
					  kernel_initializer=TruncatedNormal(mean=0., stddev=0.01, seed=42),
					  name='conv1d_2')(x)
	x = layers.ReLU()(x)

	x = layers.MaxPooling1D(pool_size=3, strides=3)(x)
	#
	x = layers.Flatten()(x)
	x = layers.Dropout(0.5)(x)
	# 全连接层1
	# 截断正态分布初始化，减小stddev值以避免量化时出现极大值
	x = layers.Dense(128, activation='relu',
				  kernel_initializer = TruncatedNormal(mean=0., stddev=0.01, seed=42))(x)
	x = layers.Dropout(0.5)(x)
	# 第二个Dense层也添加初始化器，确保权重初始化的一致性和可重复性
	x = layers.Dense(64, activation='relu', 
				  kernel_initializer=TruncatedNormal(mean=0., stddev=0.01, seed=42))(x)
	# 输出层: 根据类别数决定激活函数
	x = layers.Dense(num_classes, kernel_regularizer=l2(l2_reg), name='dense_layer')(x)
	outputs = layers.Softmax()(x)

	model = models.Model(inputs=inputs, outputs=outputs)

	# 编译模型
	# 优化器: Adam自适应学习率优化算法
	# 损失函数: 多分类交叉熵损失
	# 评估指标: 准确率
	model.compile(optimizer=optimizers.Adam(), 
					loss=losses.CategoricalCrossentropy(), 
					metrics=['accuracy'])
	# model.summary()
	
	return model

# def create_generic_model_test(input_shape, num_classes, l2_reg=0.01, dropout_rate=0.5):
# 	inputs = Input(shape=input_shape)
# 	x = inputs

# 	# conv1d_1 block 
# 	x = layers.Conv1D(filters=32 * 1, kernel_size=3, strides=1,
# 					  padding='same', kernel_regularizer=l2(l2_reg),
# 					  name='conv1d_1')(x)
# 	x = layers.BatchNormalization(name='batchnorm_1')(x)
# 	x = layers.ReLU()(x) 
# 	x = layers.MaxPooling1D(pool_size=2)(x)

# 	# conv1d_2 block 
# 	x = layers.Conv1D(filters=32 * 2, kernel_size=3, strides=1,
# 					  padding='same', kernel_regularizer=l2(l2_reg),
# 					  name='conv1d_2')(x)
# 	x = layers.BatchNormalization(name='batchnorm_2')(x)
# 	x = layers.ReLU()(x) 
# 	x = layers.MaxPooling1D(pool_size=2)(x)

# 	x = layers.GlobalAveragePooling1D(name='global_avg_pooling')(x)
# 	x = layers.Dropout(dropout_rate, name='dropout_1')(x)
# 	x = layers.Dense(128, kernel_regularizer=l2(l2_reg), name='dense_layer')(x)
# 	x = layers.BatchNormalization(name='batchnorm_dense')(x)
# 	x = layers.ReLU()(x) 
# 	x = layers.Dropout(dropout_rate / 2, name='dropout_2')(x)

# 	if num_classes > 1:
# 		outputs = layers.Dense(num_classes, activation='softmax', name='output_layer')(x)
# 	else:
# 		outputs = layers.Dense(1, activation='sigmoid', name='output_layer')(x)

# 	model = models.Model(inputs=inputs, outputs=outputs)
# 	# 注意：根据 num_classes 与数据标签形式,用户可以在外部调用 model.compile 以调整 loss
# 	model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# 	return model