# coding:utf-8
import keras
from keras import backend as K, Sequential
from keras.layers import ZeroPadding2D, Conv2D, BatchNormalization, MaxPooling2D, Dropout, AveragePooling2D, Flatten, \
    Dense
from keras.models import load_model
from keras.models import Model
from matplotlib import pyplot as plt
import cv2
import numpy as np
ROWS = 297
COLS = 396
CHANNELS = 3
def main():
    # REW:可视化版本1
    model:Model = load_model(r'F:\Resources\model\ws3_model.h5')
    image = cv2.imread(r'F:\bigphoto\monkey\u=1820656003,1822753319&fm=200&gp=0.jpg')
    image_arr = cv2.resize(image,(COLS,ROWS))
    print(image_arr.shape)
    image_arr = np.expand_dims(image_arr, axis=0)  # 0最外面，2最里面
    print(dir(model.layers[0]))
    """
    一种简单的方法是创建一个新的Model，使得它的输出是你想要的那个输出
    可以建立一个Keras的函数来达到这一目的
    """

    #你的模型中含有Dropout层，批规范化（BatchNormalization）层等组件，你需要在函数中传递一个learning_phase的标记
    layer1 = K.function([model.layers[0].input,K.learning_phase()],[model.layers[0].output])
    # print(model.layers[0].input) # 只是一个tensor
    # cv2.imshow('imim',model.layers[0].output)
    # cv2.waitKey(0)

    # 只修改inpu_image
    f1 = layer1([image_arr,1])[0]
    print(f1,type(f1),f1.shape)
    # print(f1)
    for _ in range(16):
        show_img = f1[:,:,:,_] # REW:这里是展示的所有通道特征，这里展示了16个通道
        print(show_img.shape)
        show_img.shape = [297,396]
        plt.subplot(6,6,_+1)
        plt.imshow(show_img,cmap='gray')
        plt.axis('off')
    plt.show()

def mina():
    from vis.losses import ActivationMaximization
    from vis.regularizers import TotalVariation, LPNorm
    from vis.modifiers import Jitter
    from vis.optimizer import Optimizer
    from vis.callbacks import GifGenerator
    from vis.utils.vggnet import VGG16

    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    output_class = [20]

    losses = [
        (ActivationMaximization(layer_dict[layer_name], output_class), 2),
        (LPNorm(model.input), 10),
        (TotalVariation(model.input), 10)
    ]
    opt = Optimizer(model.input, losses)
    opt.minimize(max_iter=500, verbose=True, image_modifiers=[Jitter()], callbacks=[GifGenerator('opt_progress')])
def mian():

    model = Sequential()
    # input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
    # this applies 32 convolution filters of size 3x3 each.
    model.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1)))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    # model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), activation='relu', padding='same', ))
    # model.add(Conv2D(64, (3, 3), activation='relu', padding='same',))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(128, (3, 3), activation='relu', padding='same', ))
    # model.add(Conv2D(128, (3, 3), activation='relu', padding='same',))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(AveragePooling2D((5, 5)))

    model.add(Flatten())
    # model.add(Dense(512, activation='relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax'))

    test_x = []
    img_src = cv2.imdecode(np.fromfile(r'c:\temp.tif', dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
    img = cv2.resize(img_src, (38, 38), interpolation=cv2.INTER_CUBIC)
    # img = np.random.randint(0,255,(38,38))
    img = (255 - img) / 255
    img = np.reshape(img, (38, 38, 1))
    test_x.append(img)

    ###################################################################
    layer = model.layers[1]
    weight = layer.get_weights()
    # print(weight)
    print(np.asarray(weight).shape)
    model_v1 = Sequential()
    # input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
    # this applies 32 convolution filters of size 3x3 each.
    model_v1.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1)))
    model_v1.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    # model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model_v1.layers[1].set_weights(weight)

    re = model_v1.predict(np.array(test_x))
    print(np.shape(re))
    re = np.transpose(re, (0, 3, 1, 2))
    for i in range(32):
        plt.subplot(4, 8, i + 1)
        plt.imshow(re[0][i])  # , cmap='gray'
    plt.show()

    ##################################################################
    model_v2 = Sequential()
    # input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
    # this applies 32 convolution filters of size 3x3 each.
    model_v2.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1)))
    model_v2.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    # model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model_v2.add(BatchNormalization())
    model_v2.add(MaxPooling2D(pool_size=(2, 2)))
    model_v2.add(Dropout(0.25))

    model_v2.add(Conv2D(64, (3, 3), activation='relu', padding='same', ))
    print(len(model_v2.layers))
    layer1 = model.layers[1]
    weight1 = layer1.get_weights()
    model_v2.layers[1].set_weights(weight1)
    layer5 = model.layers[5]
    weight5 = layer5.get_weights()
    model_v2.layers[5].set_weights(weight5)
    re2 = model_v2.predict(np.array(test_x))
    re2 = np.transpose(re2, (0, 3, 1, 2))
    for i in range(64):
        plt.subplot(8, 8, i + 1)
        plt.imshow(re2[0][i])  # , cmap='gray'
    plt.show()

    ##################################################################
    model_v3 = Sequential()
    # input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
    # this applies 32 convolution filters of size 3x3 each.
    model_v3.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1)))
    model_v3.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    # model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model_v3.add(BatchNormalization())
    model_v3.add(MaxPooling2D(pool_size=(2, 2)))
    model_v3.add(Dropout(0.25))

    model_v3.add(Conv2D(64, (3, 3), activation='relu', padding='same', ))
    # model.add(Conv2D(64, (3, 3), activation='relu', padding='same',))
    model_v3.add(BatchNormalization())
    model_v3.add(MaxPooling2D(pool_size=(2, 2)))
    model_v3.add(Dropout(0.25))

    model_v3.add(Conv2D(128, (3, 3), activation='relu', padding='same', ))

    print(len(model_v3.layers))
    layer1 = model.layers[1]
    weight1 = layer1.get_weights()
    model_v3.layers[1].set_weights(weight1)
    layer5 = model.layers[5]
    weight5 = layer5.get_weights()
    model_v3.layers[5].set_weights(weight5)
    layer9 = model.layers[9]
    weight9 = layer9.get_weights()
    model_v3.layers[9].set_weights(weight9)
    re3 = model_v3.predict(np.array(test_x))
    re3 = np.transpose(re3, (0, 3, 1, 2))
    for i in range(121):
        plt.subplot(11, 11, i + 1)
        plt.imshow(re3[0][i])  # , cmap='gray'
    plt.show()

def gaozhong():
    x = np.array([1,3,5,7,9,11])
    from matplotlib import pyplot as plt
    plt.figure()
    for w in range(1,5):
        plt.plot(x*w+3)
    plt.show()

gaozhong()

# mina()