from keras.layers import Input, Dense, concatenate, Lambda, RepeatVector, Softmax
from keras.models import Model
from keras.optimizers import adam_v2
from keras.callbacks import *
# from keras.engine.topology import Layer
import keras.backend as K
import keras.losses as Kloss
import numpy as np
import keras
from keras.utils.vis_utils import plot_model

import transDist
totSum = 25

def genLossTrue(dist2,y_true,changeSub):
    if y_true==changeSub:
        switch_y_true=0
    elif y_true==0:
        switch_y_true=changeSub
    else:
        switch_y_true=y_true

    dist2_true_onehot=np.zeros((12,))
    dist2_true_onehot[y_true]=totSum

    dist2_true_onehot_new = np.zeros((12,), dtype=np.int)
    dist2_true_onehot_new[y_true] = 1
    # return dist2_true_onehot_new
    # 如果dist2已经是one-hot向量 就返回one-hot*dist2.sum
    if np.where(dist2)[0].shape[0] == 1:
        dist2_true_norm = dist2_true_onehot
        ret = np.array([dist2_true_onehot, dist2_true_norm])
        return dist2_true_norm
    # 计算dist2_true_norm，地雷炸弹军棋生成均匀分布，其它生成正态分布
    if y_true==10 or y_true==11 or y_true==0:
        dist2_true_norm=transDist.toUniform(dist2,switch_y_true)
    else:
        # 需要用未交换的dist2生成正态分布
        original_dist2=dist2.copy()
        original_dist2[0], original_dist2[changeSub] = original_dist2[changeSub], original_dist2[0]
        dist2_true_norm=transDist.toNorm(original_dist2,y_true)
        # 取离峰远那边的概率值，调整军旗地雷炸弹位置
        if y_true<=5: # 取右边
            dist2_true_norm[0]=dist2_true_norm[11]
            dist2_true_norm[10]=dist2_true_norm[11]
        else:
            dist2_true_norm[10]=dist2_true_norm[0]
            dist2_true_norm[11]=dist2_true_norm[0]
        # 换位
        dist2_true_norm[0], dist2_true_norm[changeSub] = dist2_true_norm[changeSub], dist2_true_norm[0]

    ret=np.array([dist2_true_onehot, dist2_true_norm])
    if True in np.isnan(dist2_true_norm):
        print(dist2)
        print('fuck genloss')
    return dist2_true_norm


'''class mulFirst(Layer):
    def __init__(self, **kwargs):
        super(mulFirst, self).__init__(**kwargs)

    def build(self, input_shape):
        self.zeroDim=input_shape[0][0]
        super(mulFirst, self).build(input_shape)  # 一定要在最后调用它

    def call(self, x, **kwargs):
        assert isinstance(x, list)
        k, dist2 = x

        mask=K.zeros((self.zeroDim,12))
        for i in range(self.zeroDim):
            mask[i,0]=1

        mask*=K.transpose(k)
        dist2=dist2*mask
        return dist2

    def compute_output_shape(self, input_shape):
        return (self.zeroDim,12)'''


class ExitNaN(keras.callbacks.Callback):
    def on_train_begin(self, logs=None):
        pass


    def on_batch_end(self, batch, logs=None):
        if not logs.get('loss') >= 0:
            print('fuck callback')
            exit(1)

class PG:
    def __init__(self):
        self.build_model()
        self.replayDist1=[]
        self.replayDist2=[]
        self.replayY=[]



        '''if os.path.exists('pg.h5'):
            self.predModel.load_weights('pg.h5')'''
        model=self.predModel
        plot_model(model, './model.bmp', show_shapes=True)



    def build_model(self):
        dist1 = Input(shape=(12,),name='dist1')
        dist2 = Input(shape=(12,),name='dist2')
        sum = Input(shape=(1,),name='sum')
        allInput = concatenate([dist1, dist2, sum])
        layer1 = Dense(16, activation='relu')(allInput)
        layer1 = Dense(32, activation='relu')(layer1)
        layer1 = Dense(64, activation='relu')(layer1)
        ''''layer1 = Dense(128, activation='relu')(layer1)
        layer1 = Dense(256, activation='relu')(layer1)
        layer1 = Dense(512, activation='relu')(layer1)
        layer1 = Dense(1024, activation='relu')(layer1)
        layer1 = Dense(512, activation='relu')(layer1)
        layer1 = Dense(256, activation='relu')(layer1)
        layer1 = Dense(128, activation='relu')(layer1)'''
        layer1 = Dense(64, activation='relu')(layer1)
        layer1 = Dense(32, activation='relu')(layer1)
        layer1 = Dense(16, activation='relu')(layer1)

        '''
        k = Dense(1, activation='relu')(layer1)
        dist2 = mulFirst()([k,dist2])

        allInput = concatenate([dist1, dist2, sum])
        layer2 = Dense(32, activation='relu')(allInput)
        layer2 = Dense(12, activation='relu')(layer2)
        '''
        layer2 = Dense(12, activation='relu')(layer1)
        layer2 = Lambda(lambda x: K.abs(x))(layer2)
        layer2 = Softmax()(layer2)
        output = Lambda(lambda x: x * totSum)(layer2)

        self.predModel = Model(inputs=[dist1, dist2, sum], outputs=output)
        self.predModel.compile(loss=self.loss, optimizer=adam_v2.Adam(lr=0.01, beta_1=0.95, decay=0.1))


    def predict(self, dist1, dist2, dist2_true=None, changeSub=None):
        d1, d2, ts = np.array([dist1]), np.array([dist2]), np.array([totSum])
        dist2_pred=self.predModel.predict([d1,d2,ts])
        if True in np.isnan(dist2_pred):
            print('fuck prob')
            print(dist2_pred)

        if not dist2_true is None:
            train_true=genLossTrue(dist2,dist2_true,changeSub)
            self.replayDist1.append(dist1)
            self.replayDist2.append(dist2)
            self.replayY.append(train_true)

        ret=dist2_pred[0]
        for i in range(len(dist2)):
            if ret[i] - 0 <= 1e-2 and dist2[i] - 0 > 1e-2:
                ret[i]=0.1

        '''if np.random.randint(0, 100) < 2:
            print('predict    ', ret)
            print('train_true ', train_true)
            print('dist2      ', dist2)'''

        return ret


    def loss(self, y_true, y_pred):
        return Kloss.categorical_crossentropy(y_true, y_pred)
        # return Kloss.mean_squared_error(y_true, y_pred)


    def train(self):
        replaySum=np.full((len(self.replayDist1),),25)
        d1, d2 = np.array(self.replayDist1), np.array(self.replayDist2)
        x=[d1, d2, replaySum]
        y=np.array(self.replayY)

        history = ExitNaN()
        self.predModel.fit(x, y, callbacks=[history], epochs=200)

        print('evaluate:', self.predModel.evaluate(x, y))
        self.replayY=[]
        self.replayDist1 = []
        self.replayDist2 = []
        self.predModel.save_weights('pg.h5')
        print('lr: ', self.predModel.optimizer.lr)

    def verify(self):
        replaySum = np.full((len(self.replayDist1),), 25)
        d1, d2 = np.array(self.replayDist1), np.array(self.replayDist2)
        x = [d1, d2, replaySum]
        y = np.array(self.replayY)
        print('verify evaluate:',self.predModel.evaluate(x,y))
        self.replayY = []
        self.replayDist1 = []
        self.replayDist2 = []

