import numpy as np
import keras
from keras import Input, Model
from keras.engine.saving import load_model
from keras.layers import LSTM, Dense, Embedding, Flatten,Conv2D,MaxPool2D
from keras_applications.densenet import layers
import keras.backend as K
from keras import Sequential
import get_data
import tensorflow as tf
import attention
import pickle
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# from tensorflow import keras
from keras import layers
from keras import Input
from keras.models import Model

from keras.optimizers import RMSprop
import os

os.environ['CUDA_VISIBLE_DEVICES']='2, 3'
num_pic=3195
#图片规格
width = 460
height = 460

def get_batch(x_pic,y, batch_size):
    # 从数据集中随机取出batch_size个元素并返回
    index = np.random.randint(0, num_pic, batch_size)
    return x_pic[index, :,:,:], y

# class CNNA(tf.keras.Model):
#     def __init__(self):
#           super().__init__()
#         self.flatten = tf.keras.layers.Flatten()    # Flatten层将除第一维（batch_size）以外的维度展平
#         self.dense1 = tf.keras.layers.Dense(units=100, activation=tf.nn.relu)
#         self.dense2 = tf.keras.layers.Dense(units=10)
#         self.conv1 = tf.keras.layers.Conv2D(
#             filters=32,  # 卷积层神经元（卷积核）数目
#             kernel_size=[5, 5],  # 感受野大小
#             padding='same',  # padding策略（vaild 或 same）
#             activation=tf.nn.relu  # 激活函数
#         )
#         self.pool1 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
#         self.conv2 = tf.keras.layers.Conv2D(
#             filters=64,
#             kernel_size=[5, 5],
#             padding='same',
#             activation=tf.nn.relu
#         )
#         self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
#         self.flatten = tf.keras.layers.Reshape(target_shape=(7 * 7 * 64,))
#         self.dense1 = tf.keras.layers.Dense(units=1024, activation=tf.nn.relu)
#         self.dense2 = tf.keras.layers.Dense(units=10)
#
#         self.VOCAB_SIZE = 10000
#         self.EMBED_DIM = 128
#
#
#
#
#     def call(self, inputs_pic,inputs_text):         # [batch_size, 28, 28, 1]
#
#         # 注意力机制
#         # x_pic=attention.channel_attention(inputs_pic)
#         # x_pic=attention.spatial_attention(inputs_pic)
#         print("进了")
#         # 卷积池化
#         #x_pic=tf.reshape(inputs_pic,(None,500,500,3))
#         x_pic=self.conv1(x_pic)
#         print("jinle2")
#         x_pic = self.pool1(x_pic)  # [batch_size, 14, 14, 32]
#         x_pic = self.conv2(x_pic)  # [batch_size, 14, 14, 64]
#         x_pic = self.pool2(x_pic)  # [batch_size, 7, 7, 64]
#         x_pic = self.flatten(x_pic)
#         #inputs_text = torch.from_numpy(inputs_text)
#         inputs_text=tf.convert_to_tensor(inputs_text, name='t')
#         x_text=Embedding(self.VOCAB_SIZE, self.EMBED_DIM, mask_zero=True)(inputs_text)
#         x_text=LSTM(64,return_sequences="true")(x_text)
#         x_text=LSTM(64,return_sequences="false")(x_text)
#         output=x_text
#         merged=layers.concatenate([x_pic,x_text],axis=-1)
#         predictions=Dense(3,activation='softmax')
#         merged = self.dense2(merged)
#         output = tf.nn.softmax(merged)
#         print(output.type)
#         return output
#         # return predictions

def getPrecision(y_true, y_pred):
    TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))  # TP
    N = (-1) * K.sum(K.round(K.clip(y_true - K.ones_like(y_true), -1, 0)))  # N
    TN = K.sum(K.round(K.clip((y_true - K.ones_like(y_true)) * (y_pred - K.ones_like(y_pred)), 0, 1)))  # TN
    FP = N - TN
    precision = TP / (TP + FP + K.epsilon())  # TT/P
    return precision


def getRecall(y_true, y_pred):
    TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))  # TP
    P = K.sum(K.round(K.clip(y_true, 0, 1)))
    FN = P - TP  # FN=P-TP
    recall = TP / (TP + FN + K.epsilon())  # TP/(TP+FN)
    return recall

if __name__ == '__main__':

    num_epochs = 5#数据集训练5遍
    batch_size = 5#一批训练5个图，一个batch更新一次参数
    learning_rate = 0.001
    # model = CNNA()
    # pic_np, text_np, label_np = get_data.get_data()

    # 一：层的搭建

    optimizer = tf.keras.optimizers.Adam(lr=learning_rate)

    #num_batches是训练多少批次=更新多少次参数（通过计算式可以看出来，图片数量/每批多少图*整个数据集训练多少次）
    num_batches = int(num_pic // batch_size * num_epochs)

    #输入数据
    data_input_pic = Input(shape=(width,height,3))
    #data_input_text = Input(shape=(50))

    #1：attention机制
    # (通道attention机制，空间attentin机制没有开）
    #相当于图像的每一个通道被乘上了不同的权重，有一些通道被赋予了更高的注意力
    x_pic=attention.channel_attention(data_input_pic)

    # x_pic=attention.spatial_attention(data_input_pic)
    # x_pic = conv1(data_input_pic)

    #2：卷积层
    x_pic=layers.Conv2D(
        filters=32,  # 卷积层神经元（卷积核）数目
        kernel_size=[5, 5],  # 感受野大小
        padding='same',  # padding策略（vaild 或 same）
        activation=tf.nn.relu  # 激活函数
    )(x_pic)
    # x_pic = pool1(x_pic)  # [batch_size, 14, 14, 32]

    #3：池化层
    x_pic=layers.MaxPool2D(pool_size=[2, 2], strides=2)(x_pic)

    # x_pic = self.conv2(x_pic)  # [batch_size, 14, 14, 64]
    # x_pic = self.pool2(x_pic)  # [batch_size, 7, 7, 64]
    # x_pic = flatten(x_pic)

    x_pic=Flatten()(x_pic)

    #inputs_text = tf.convert_to_tensor(inputs_text, name='t')
    # x_text = Embedding(VOCAB_SIZE, EMBED_DIM, mask_zero=True)(data_input_text)
    # x_text = LSTM(64, return_sequences="true")(x_text)
    # x_text = LSTM(64, return_sequences="false")(x_text)

    #共享权重
    # merged = layers.concatenate([x_pic, x_text], axis=-1)
    # predictions = Dense(3, activation='softmax')(x_pic)
    #predictions=dense1(x_pic)

    #4：全连接层
    predictions = layers.Dense(3, name="digital")(x_pic)
    # merged = dense2(merged)
    # output = tf.nn.softmax(merged)
    #print(predictions.type)
    # output=model(data_input_pic)


    # 二 ：模型设置参数

    new_model = Model(data_input_pic,predictions)

    #get数据
    pic_np, text_np, label_one_hot_np=get_data.get_data()


    new_model.compile(optimizer=RMSprop(), loss="categorical_crossentropy",
                          metrics=['accuracy',getRecall,getPrecision])  #这三个是训练的时候显示的，后两个是自己定义的，不用看怎么定义的，意义不大

    # 写一个LossHistory类，保存loss和acc
    class LossHistory(keras.callbacks.Callback):
        def on_train_begin(self, logs={}):
            self.losses = {'batch': [], 'epoch': []}
            self.accuracy = {'batch': [], 'epoch': []}
            self.val_loss = {'batch': [], 'epoch': []}
            self.val_acc = {'batch': [], 'epoch': []}

        def on_batch_end(self, batch, logs={}):
            self.losses['batch'].append(logs.get('loss'))
            self.accuracy['batch'].append(logs.get('acc'))
            self.val_loss['batch'].append(logs.get('val_loss'))
            self.val_acc['batch'].append(logs.get('val_acc'))

        def on_epoch_end(self, batch, logs={}):
            self.losses['epoch'].append(logs.get('loss'))
            self.accuracy['epoch'].append(logs.get('acc'))
            self.val_loss['epoch'].append(logs.get('val_loss'))
            self.val_acc['epoch'].append(logs.get('val_acc'))

        def loss_plot(self, loss_type):
            iters = range(len(self.losses[loss_type]))
            plt.figure()
            # acc
            plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc')
            # loss
            # plt.plot(iters, self.losses[loss_type], 'g', label='train loss')
            if loss_type == 'epoch':
                # val_acc
                plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc')
                # val_loss
                # plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss')
            plt.grid(True)
            plt.xlabel(loss_type)
            plt.ylabel('acc')
            plt.legend(loc="upper right")
            plt.show()

    history = LossHistory()

    #关闭上限
    new_model.fit(pic_np, label_one_hot_np, epochs=2, batch_size=5,shuffle=True,validation_split=0.3,callbacks=[history])
    history.loss_plot('epoch')

    # history.history
    # with open('F:\\MCM-ICM\\2021\\log.txt', 'wb') as file_txt:
    #     pickle.dump(history.history, file_txt)
    # with open('F:\\MCM-ICM\\2021\\log.txt', 'rb') as file_txt:
    #     history = pickle.load(file_txt)

    # 模型评估
    # score = new_model.evaluate(X_test, Y_test, verbose=0)
    # print('Test score:', score[0])
    # print('Test accuracy:', score[1])

    # 绘制acc-loss曲线

    # 画图失败，没有acc这个项
    # plt.plot(history['acc'])
    # plt.plot(history['val_acc'])
    # plt.title("model accuracy")
    # plt.ylabel("Accuracy")
    # plt.xlabel("epoch")
    # plt.legend(["train", "test"], loc="lower right")
    # plt.show()

    # save model
    # 感觉不应该写在这
    print("Saving model to disk \n")
    mp = "iris_model.h5"
    new_model.save(mp)
    model = load_model('my_model.h5')

    #关闭试试下限

    # num_batches是要训练的批次数=网络参数更新次数
    for batch_index in range(num_batches):
        x_pic, y = get_batch(pic_np,label_one_hot_np,batch_size)#一批训练数据
        y_pred = new_model.predict(x_pic)#获得模型预测值

        #训练
        with tf.GradientTape() as tape:
            y_pred = new_model(x_pic)
            loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=y, y_pred=y_pred)

            loss = tf.reduce_mean(loss)

            #代价敏感学习
            Cost_sensitive_table=[[1,2000/15,2000/15],
                                  [15/2000,1,1],
                                  [15/2000,1,1]]
            Cost_sensitive=1
            if y_pred==[1,0,0] and y==[0,1,0]:
                Cost_sensitive=Cost_sensitive_table[0,1]
            if y_pred==[1,0,0] and y==[0,0,1]:
                Cost_sensitive=Cost_sensitive_table[0,2]
            if y_pred==[0,1,0] and y==[1,0,0]:
                Cost_sensitive=Cost_sensitive_table[1,0]
            if y_pred==[0,0,1] and y==[1,0,0]:
                Cost_sensitive=Cost_sensitive_table[2,0]

            loss=loss*Cost_sensitive#不同的结果loss乘的权重不同

            print("batch %d: loss %f" % (batch_index, loss.numpy()))
        grads = tape.gradient(loss, new_model.variables)
        optimizer.apply_gradients(grads_and_vars=zip(grads, new_model.variables))