'''import os.path
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import pickle
from sklearn.metrics import r2_score, mean_squared_error
from keras.losses import mean_absolute_error
from keras.models import Model
from keras.layers import Input, TimeDistributed, Bidirectional, GRU
from keras.wrappers.scikit_learn import KerasRegressor
from keras.callbacks import Callback
from sklearn.model_selection import GridSearchCV
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
import matplotlib.pyplot as plt
from keras.layers import Conv1D, Dropout, Dense, Concatenate, MultiHeadAttention, LayerNormalization,GlobalAveragePooling1D, Add,BatchNormalization
from keras.regularizers import l2
from keras.optimizers import Adam, RMSprop
import tensorflow as tf
from keras.models import load_model
from keras.utils.vis_utils import plot_model
from keras.layers import Attention
from keras.utils.vis_utils import plot_model
writefile_folder = r'F:\工作总结\月报'
N_past_value = 30
Lstm_input_size = 6
Pre_size = 30
def build_TCN_3layer_model(optimizer='adam', filters=16, kernel_size=3,
                           dropout_rate=0.3, l2_reg=1e-3, dilation_rates=[1, 2, 4]):
    """
    三层TCN模型
    dilation_rates: 每层的膨胀系数，增加感受野
    """
    inputs = Input(shape=(N_past_value, Lstm_input_size))

    # 第一层
    conv1 = Conv1D(filters=filters, kernel_size=kernel_size,
                   padding='causal', activation='relu', dilation_rate=dilation_rates[0],
                   kernel_regularizer=l2(l2_reg))(inputs)
    conv1 = BatchNormalization()(conv1)
    conv1 = Dropout(dropout_rate)(conv1)

    # 残差连接1
    residual1 = Conv1D(filters, kernel_size=1, padding='same')(inputs)
    x = Add()([conv1, residual1])

    # 第二层
    conv2 = Conv1D(filters=filters * 2, kernel_size=kernel_size,
                   padding='causal', activation='relu', dilation_rate=dilation_rates[1],
                   kernel_regularizer=l2(l2_reg))(x)
    conv2 = BatchNormalization()(conv2)
    conv2 = Dropout(dropout_rate)(conv2)

    # 残差连接2
    residual2 = Conv1D(filters * 2, kernel_size=1, padding='same')(x)
    x = Add()([conv2, residual2])

    # 第三层
    conv3 = Conv1D(filters=filters * 4, kernel_size=kernel_size,
                   padding='causal', activation='relu', dilation_rate=dilation_rates[2],
                   kernel_regularizer=l2(l2_reg))(x)
    conv3 = BatchNormalization()(conv3)
    conv3 = Dropout(dropout_rate)(conv3)

    # 全局平均池化
    gap = GlobalAveragePooling1D()(conv3)

    # 输出层
    outputs = Dense(Pre_size)(gap)

    model = Model(inputs, outputs)

    # 优化器配置
    if optimizer == 'adam':
        opt = Adam(learning_rate=0.001)
    elif optimizer == 'rmsprop':
        opt = RMSprop(learning_rate=0.001)
    else:
        opt = Adam(learning_rate=0.001)

    model.compile(loss='huber_loss', optimizer=opt)
    return model
'''
import os.path
import numpy as np
import pandas as pda
from sklearn.preprocessing import MinMaxScaler
import pickle
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from keras.models import Model
from keras.layers import Input, TimeDistributed
from keras.wrappers.scikit_learn import KerasRegressor
from keras.callbacks import Callback
from sklearn.model_selection import GridSearchCV
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
import matplotlib.pyplot as plt
from keras.layers import Dropout, Dense, GRU,LayerNormalization,Concatenate,Embedding,MultiHeadAttention
from keras.regularizers import l2
from keras.optimizers import Adam, RMSprop
from keras.models import load_model
import tensorflow as tf
from keras import layers
# 构建示例模型
'''example_model = build_TCN_3layer_model(
    filters=16,
    kernel_size=3,
   # gru_units=32,
    dropout_rate=0.3
#   l2_reg=1e-4
#    num_heads=4
)

# 保存模型结构图
plot_model(
    example_model,
    to_file=os.path.join(writefile_folder, 'model_architecture.png'),
    show_shapes=True,
    show_layer_names=True,
    dpi=150,
    rankdir='TB'  # 垂直布局
)

print("模型结构图已保存至:", os.path.join(writefile_folder, 'model_architecture.png'))'''
# 构建模型
'''model = build_TCN_3layer_model()

# 保存模型（包含结构和权重）
model.save('tcn_3layer_model', save_format='tf')
print("完成")
'''

import os.path
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import pickle
from sklearn.metrics import r2_score, mean_squared_error
from keras.losses import mean_absolute_error
from keras.models import Model
from keras.layers import Input, TimeDistributed, BatchNormalization, GlobalAveragePooling1D, Add, GaussianNoise
from keras.wrappers.scikit_learn import KerasRegressor
from keras.callbacks import Callback
from sklearn.model_selection import GridSearchCV
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
import matplotlib.pyplot as plt
from keras.layers import Conv1D, Dropout, Dense
from keras.regularizers import l2
from keras.optimizers import Adam, RMSprop
import os.path
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import pickle
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from keras.models import Model
from keras.layers import Input, TimeDistributed
from keras.wrappers.scikit_learn import KerasRegressor
from keras.callbacks import Callback
from sklearn.model_selection import GridSearchCV
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
import matplotlib.pyplot as plt
from keras.layers import Dropout, Dense, GRU, LSTM, Bidirectional, Conv1D, MaxPooling1D, Flatten, GlobalAveragePooling1D
from keras.regularizers import l2
from keras.optimizers import Adam, RMSprop
from keras.models import load_model
N_past_value = 60
Pre_size = 60
Lstm_input_size = 7

def build_cnn_lstm_model(optimizer='adam', filters=64, kernel_size=3,
                         lstm_units=128, dropout_rate=0.3, l2_reg=1e-4, epochs=100):
    # 模型结构保持不变，但可以适当增加网络深度
    inputs = Input(shape=(N_past_value, Lstm_input_size))

    # 可以尝试两层CNN
    x = Conv1D(filters=filters, kernel_size=kernel_size, activation='relu',
               padding='same', kernel_regularizer=l2(l2_reg))(inputs)
    x = MaxPooling1D(pool_size=2)(x)
    x = Dropout(dropout_rate)(x)

    # 第二层CNN（可选）
    x = Conv1D(filters=filters // 2, kernel_size=kernel_size, activation='relu',
               padding='same', kernel_regularizer=l2(l2_reg))(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Dropout(dropout_rate)(x)

    # LSTM层
    x = LSTM(units=lstm_units, return_sequences=False,
             kernel_regularizer=l2(l2_reg))(x)
    x = Dropout(dropout_rate)(x)

    outputs = Dense(Pre_size, activation='linear')(x)
    model = Model(inputs, outputs)

    # 优化器配置
    if optimizer == 'adam':
        opt = Adam(learning_rate=0.001)
    elif optimizer == 'rmsprop':
        opt = RMSprop(learning_rate=0.001)
    else:
        opt = Adam(learning_rate=0.001)

    model.compile(loss='mse', optimizer=opt, metrics=['mae'])
    return model


# 构建模型
model = build_cnn_lstm_model()

# 打印模型摘要
print("模型摘要:")
model.summary()

# 方法1：保存为图片文件
print("\n正在生成模型结构图...")

'''# 保存为PNG格式
plot_model(model,
           to_file='tcn_model_structure.png',
           show_shapes=True,
           show_layer_names=True,
           dpi=300,
           rankdir='TB')  # TB: 从上到下, LR: 从左到右

# 保存为PDF格式（矢量图，更清晰）
plot_model(model,
           to_file='tcn_model_structure.pdf',
           show_shapes=True,
           show_layer_names=True,
           rankdir='TB')

print("模型结构图已保存为 'tcn_model_structure.png' 和 'tcn_model_structure.pdf'")'''

'''# 方法2：显示模型结构（如果在Jupyter notebook中）
try:
    from IPython.display import Image, display

    display(Image('tcn_model_structure.png'))
except:
    pass
'''

'''# 方法3：使用matplotlib显示图片
try:
    #img = Image.open('tcn_model_structure.png')
    plt.figure(figsize=(15, 20))
    #plt.imshow(img)
    plt.axis('off')
    plt.title('TCN 3-Layer Model Architecture', fontsize=16, pad=20)
    plt.tight_layout()
    plt.savefig('tcn_model_display.jpg', dpi=300, bbox_inches='tight')
    plt.show()
except Exception as e:
    print(f"显示图片时出错: {e}")
'''
'''
# 方法4：生成更详细的文本描述
def print_model_details(model):
    print("\n" + "=" * 60)
    print("TCN模型详细结构")
    print("=" * 60)

    for i, layer in enumerate(model.layers):
        print(f"层 {i}: {layer.name}")
        print(f"  类型: {type(layer).__name__}")
        print(f"  输出形状: {layer.output_shape}")
        if hasattr(layer, 'kernel_size'):
            print(f"  卷积核大小: {layer.kernel_size}")
        if hasattr(layer, 'filters'):
            print(f"  滤波器数量: {layer.filters}")
        if hasattr(layer, 'dilation_rate'):
            print(f"  膨胀率: {layer.dilation_rate}")
        if hasattr(layer, 'rate'):
            print(f"  Dropout率: {layer.rate}")
        print()


print_model_details(model)
'''
#方法5：保存模型架构为JSON文件
model_json = model.to_json()
with open("build_TCN_3layer_model.json", "w") as json_file:
    json_file.write(model_json)
print("模型架构已保存为 'tcn_2层.json'")