import numpy as np
import keras
from keras import Input, Model
from keras.engine.saving import load_model
from keras.layers import LSTM, Dense, Embedding, Flatten, Conv2D, MaxPool2D
from keras_applications.densenet import layers
import keras.backend as K
from keras import Sequential
import tensorflow as tf
import attention
import pickle
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# from tensorflow import keras
from keras import layers
from keras import Input
from keras.models import Model

from keras.optimizers import RMSprop
import os
import parameter
import Cost_Sensitive
import Estimate_Utils

import get_data_new

batch_size = parameter.batch_size  # 500  # 一批训练5个图，一个batch更新一次参数
num_pic = parameter.num_pic  # 3195
num_epochs = parameter.num_epochs  # 30  # 数据集训练5遍
width = parameter.width  # 100
height = parameter.height  # 100


class CNN(tf.keras.Model):
    def __init__(self):
        super().__init__()

        # attention层不在这写，在call里写

        self.conv1 = tf.keras.layers.Conv2D(
            filters=32,  # 卷积层神经元（卷积核）数目
            kernel_size=[5, 5],  # 感受野大小
            padding='same',  # padding策略（vaild 或 same）
            activation=tf.nn.relu  # 激活函数
        )
        self.pool1 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)

        self.conv2 = tf.keras.layers.Conv2D(
            filters=32,  # 卷积层神经元（卷积核）数目
            kernel_size=[5, 5],  # 感受野大小
            padding='same',  # padding策略（vaild 或 same）
            activation=tf.nn.relu  # 激活函数
        )
        self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)

        # 展平
        self.flatten = tf.keras.layers.Reshape(target_shape=(25 * 25 * 32,))  # (width/2*height/2*32

        self.dense1 = tf.keras.layers.Dense(units=128, activation=tf.nn.relu)

        self.dense2 = tf.keras.layers.Dense(units=3)

    def call(self, inputs):
        # x = attention.channel_attention(inputs)
        x = self.conv1(inputs)  # [batch_size, 100, 100, 32]
        print('x.shape con1')
        print(x.shape)
        x = self.pool1(x)  # [batch_size, 50, 50, 32]
        print('x.shape pool1')
        # print(x.shape)
        x = self.conv2(x)  # [batch_size, 50, 50, 32]
        # print('x.shape conv2')
        # print(x.shape)
        x = self.pool2(x)  # [batch_size, 25, 25, 32]
        # print('x.shape pool2')
        # print(x.shape)
        x = self.flatten(x)  # [batch_size,115* 115* 32]
        x = self.dense1(x)  # [batch_size, 1024]
        x = self.dense2(x)  # [batch_size, 3]
        output = tf.nn.softmax(x)  # 转成概率向量,且每一项概率和为1
        return output


if __name__ == '__main__':

    # 1：实例化模型
    model = CNN()
    learning_rate = 0.001
    optimizer = tf.keras.optimizers.Adam(lr=learning_rate)

    # 2：加载数据
    # 2.1获得数据集
    pic_np, text_np, label_one_hot_np = get_data_new.get_data()

    # 开始训练
    # num_batches是训练多少批次=更新多少次参数（通过计算式可以看出来，图片数量/每批多少图*整个数据集训练多少次）
    num_batches = int(num_pic // batch_size * num_epochs)

    for batch_index in range(num_batches):
        # 2.2获得一批次训练数据
        X, y = get_data_new.get_batch(pic_np, label_one_hot_np, batch_size)  # 一批训练数据,X:(500, 460, 460, 3),y:(3195, 3)

        # 转tensor
        X = tf.convert_to_tensor(X)  # X:(5, 460, 460, 3)
        y = tf.convert_to_tensor(y)  # y:(5,3)

        # 没有代价敏感学习
        with tf.GradientTape() as tape:
            # [batch_size,3],概率分布
            y_pred = model(X)
            # model.summary()

            #[batch_size]
            loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=y, y_pred=y_pred)

            # 代价敏感学习 法1（c错误，有一个temp导致不能反向传播）
            # result = Cost_Sensitive.Find_FN(y_pred, y)
            # loss = Cost_Sensitive.sensitive_loss(loss, result)
            #end 法1

            """
            代价敏感学习法2
            """
            loss=Cost_Sensitive.sensitive_module(y_pred,y,loss)
            #end 法2

            loss = tf.reduce_mean(loss)

            # 模型评估
            print("batch %d: loss %f" % (batch_index, loss))  # loss.numpy()

            # acc=Estimate_Utils.getPrecision(y,y_pred)
            # print("batch %d: acc %f" % (batch_index, acc))
            # y_pred_012 = Cost_Sensitive.Probability_To_012(y_pred)
            # accuracy, update_op = tf.metrics.accuracy(labels=y, predictions=tf.convert_to_tensor(y_pred_012))
            # print("batch %d: acc %f" % (batch_index, accuracy))

        grads = tape.gradient(loss, model.variables)
        optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))

        """
        test:目前可以测acc
        (一个batch测试一次acc）
        """
        # [batch_size,3]
        y_pred_test = model(X)
        # [batch_size,3] => [batch_size] int64
        y_pred_test = tf.argmax(y_pred_test, axis=1)
        # [batch_size] int64=>[batch_size] int32 索引
        y_pred_test=tf.cast(y_pred_test,dtype=tf.int32)
        print('y_pred_test',y_pred_test)

        correct = tf.equal(y_pred_test, y)
        correct = tf.reduce_sum(tf.cast(correct, dtype=tf.int32))

        acc=correct/batch_size
        print(batch_index,'test:acc=',acc)


        #召回率
        recall=Estimate_Utils.getRecall(y,y_pred_test)
        print(batch_index, 'test:recall', recall)
