import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
import cv2
import subprocess
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Agg")
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
import tensorflow_probability as tfp
from math import log10,exp,sqrt
from know import KNOW
from know import MEASURE
from know import PARAM

path,folder="datasetsedd","train"
train_size,batch_size=10000,32

_know,_forward,_,target=KNOW(36,50.0,1.0,10,50,150.0,15.0,5.0),MEASURE(),PARAM(),64
ZOOM,GROUND,TARGET=1,1200.0,target

def prepare_data():
    fs=[fs for _,_,fs in os.walk(os.path.join(path,folder))][0][:]
    fs=[det for det in fs if ".dat" in det]
    images,labels=[],[]
    for each in fs:        
        img=np.genfromtxt(os.path.join(path,folder,each))
        images.append(img)

        forward=os.path.splitext(each)[0]
        content=[]
        with open(os.path.join(path,folder,forward+".res"),"r") as fp:
            # content=[float(i) for i in [det.split() for det in fp][0]]
            content=[float(i) for i in fp]
        labels.append(np.array(content))

    images=np.array(images)
    images=images.reshape((images.shape[0],target,target,1))
    images=images.astype(np.float32)
    mean,std=np.mean(images),np.std(images)
    labels=np.array(labels).astype(np.float32)

    dataset=(tf.data.Dataset.from_tensor_slices((images,labels)).shuffle(train_size).batch(batch_size))    
    dataset=dataset.prefetch(tf.data.AUTOTUNE)

    return dataset,mean,std

# closed form kl loss computation between variational posterior q(z|x) and unit Gaussian prior p(z) 
def kl_loss(sample,z_mu,z_rho):
    sigma_squared=tf.math.softplus(z_rho)**2
    kl_1d=-0.5*(1+tf.math.log(sigma_squared)-z_mu**2-sigma_squared)

    # log2pi=tf.math.log(2.*np.pi)
    # kl_1d=-.5*((sample-z_mu)**2.*tf.exp(-z_rho)+z_rho+log2pi)

    # sum over sample dim,average over batch dim
    kl_batch=tf.reduce_mean(tf.reduce_sum(kl_1d,axis=1))

    return kl_batch

def elbo(sample,z_mu,z_rho,decoded_img,original_img):
    # reconstruction loss
    mse=tf.reduce_mean(tf.reduce_sum(tf.square(original_img-decoded_img),axis=1))
    # kl loss
    kl=kl_loss(sample,z_mu,z_rho)

    return mse,kl

def get_conditional_encoder(latent_dim):
    inputs=tf.keras.Input(shape=(target,target,1))
    # x=tf.keras.layers.Conv2D(filters=16,kernel_size=3,strides=(2,2),activation='relu')(inputs)
    x=tf.keras.layers.Conv2D(filters=16,kernel_size=3,strides=(2,2))(inputs)
    x=tf.keras.layers.LeakyReLU()(x)
    x=tf.keras.layers.Dropout(0.2)(x)
    # x=tf.keras.layers.Conv2D(filters=32,kernel_size=3,strides=(2,2),activation='relu')(inputs)
    x=tf.keras.layers.Conv2D(filters=32,kernel_size=3,strides=(2,2))(x)
    x=tf.keras.layers.LeakyReLU()(x)
    x=tf.keras.layers.Dropout(0.2)(x)
    # x=tf.keras.layers.Conv2D(filters=64,kernel_size=3,strides=(2,2),activation='relu')(x)
    x=tf.keras.layers.Conv2D(filters=64,kernel_size=3,strides=(2,2))(x)
    x=tf.keras.layers.LeakyReLU()(x)
    x=tf.keras.layers.Dropout(0.2)(x)
    x=tf.keras.layers.Flatten()(x)
    mu=tf.keras.layers.Dense(units=latent_dim)(x)
    # mu=tf.keras.layers.ConvLSTM1D(filters=latent_dim,kernel_size=3,return_sequences=False,go_backwards=True,kernel_initializer='he_normal')(x)
    rho=tf.keras.layers.Dense(units=latent_dim)(x)

    return tf.keras.Model(inputs=inputs,outputs=[mu,rho])

def get_conditional_encoder1():
    inputs=tf.keras.Input(shape=(target,target,1))
    x=tf.keras.layers.Conv2D(filters=32,kernel_size=3,strides=(2,2),activation='relu')(inputs)
    x=tf.keras.layers.Conv2D(filters=64,kernel_size=3,strides=(2,2),activation='relu')(x)
    x=tf.keras.layers.Flatten()(x)

    return tf.keras.Model(inputs=inputs,outputs=[x])

def get_conditional_encoder2(latent_dim,input_size):
    inputs=tf.keras.Input(shape=(input_size))
    mu=tf.keras.layers.Dense(units=latent_dim)(inputs)
    rho=tf.keras.layers.Dense(units=latent_dim)(inputs)

    return  tf.keras.Model(inputs=inputs,outputs=[mu,rho])

def get_conditional_decoder(latent_dim):
    z=tf.keras.Input(shape=(latent_dim+_forward.NFREQ*int(_know.XRANGE-2*_know.XEXPAND)+_forward.NFREQ*int(_know.XRANGE-2*_know.XEXPAND)+_forward.NFREQ*int(_know.XRANGE-2*_know.XEXPAND)+_forward.NFREQ*int(_know.XRANGE-2*_know.XEXPAND)))
    x=tf.keras.layers.Dense(units=16*16*8,activation='relu')(z)
    x=tf.keras.layers.Reshape(target_shape=(16,16,8))(x)
    x=tf.keras.layers.Conv2DTranspose(filters=64,kernel_size=3,strides=1,padding='same')(x)
    x=tf.keras.layers.LeakyReLU()(x)
    x=tf.keras.layers.Dropout(0.2)(x)
    x=tf.keras.layers.Conv2DTranspose(filters=32,kernel_size=3,strides=2,padding='same')(x)
    x=tf.keras.layers.LeakyReLU()(x)
    x=tf.keras.layers.Dropout(0.2)(x)
    x=tf.keras.layers.Conv2DTranspose(filters=16,kernel_size=3,strides=2,padding='same')(x)
    x=tf.keras.layers.LeakyReLU()(x)
    x=tf.keras.layers.Dropout(0.2)(x)
    decoded_img=tf.keras.layers.Conv2DTranspose(filters=1,kernel_size=3,strides=1,padding='same')(x)
    
    return tf.keras.Model(inputs=z,outputs=[decoded_img])

class Conditional_VAE(tf.keras.Model):
    def __init__(self,latent_dim):
        super().__init__()
        self.latent_dim=latent_dim
        self.encoder_block=get_conditional_encoder(latent_dim=latent_dim)
        print(self.encoder_block.summary())
        # self.encoder_block1=get_conditional_encoder1()
        # self.encoder_block2=get_conditional_encoder2(latent_dim=latent_dim,input_size=16200)
        self.decoder_block=get_conditional_decoder(latent_dim)
        print(self.decoder_block.summary())
    
    def call(self,img,labels):
        z_mu,z_rho=self.encoder_block(img)
        # enc1_output=self.encoder_block1(img)
        # img_lbl_concat=np.concatenate((enc1_output,labels),axis=1)
        # z_mu,z_rho=self.encoder_block2(img_lbl_concat)
        # z_mu,z_rho=self.encoder_block2(enc1_output)
        
        epsilon=tf.random.normal(shape=z_mu.shape,mean=0.0,stddev=1.0)
        z=z_mu+tf.math.softplus(z_rho)*epsilon
        # z=epsilon*tf.exp(z_rho*0.5)+z_mu

        # decoder p(x|z,y)
        z_lbl_concat=np.concatenate((z,labels),axis=1)
        decoded_img=self.decoder_block(z_lbl_concat)
        # decoded_img=self.decoder_block(z)

        return z_mu,z_rho,z,decoded_img

def train(latent_dim,beta,epochs,ds,mean,std):
    model=Conditional_VAE(latent_dim)
    optimizer=tf.keras.optimizers.Adam(learning_rate=0.001)
    kl_loss_tracker=tf.keras.metrics.Mean(name='kl_loss')
    mse_loss_tracker=tf.keras.metrics.Mean(name='mse_loss')
    result_loss_tracker=tf.keras.metrics.Mean(name='result_loss')

    error=[]
    for index in range(epochs):
        for _,(imgs,labels) in ds.enumerate():
            with tf.GradientTape() as tape:
                z_mu,z_rho,sample,decoded_imgs=model(imgs,labels)
                mse,kl=elbo(sample,z_mu,z_rho,decoded_imgs,imgs)

                # for preds in decoded_imgs[:1]:
                #     preds=preds.numpy().reshape((TARGET,TARGET))
                #     nx,ny=_know.XRANGE,_know.NLAYER
                #     top_border,bottom_border,left_border,right_border=int((TARGET-ny*ZOOM)/2.),int((TARGET-ny*ZOOM)/2.),int((TARGET-nx*ZOOM)/2.),int((TARGET-nx*ZOOM)/2.)
                #     preds=preds[top_border:TARGET-bottom_border,left_border:TARGET-right_border]
                #     preds=np.log(((1.0-cv2.resize(preds,(nx,ny)))*GROUND))
                #     np.nan_to_num(preds,False,0.0)                    
        
                #     with open(os.path.join(path,"temp","forward.mod"),"w") as fp:        
                #         fp.write(" ".join([str(det) for det in [_know.XRANGE,_know.NLAYER,"LOGE"]])+"\n")
                #         fp.write(" ".join([str(det) for det in _know.INTERVAL])+"\n")
                #         fp.write(" ".join([str(det) for det in _know.TIC])+"\n")
                #         np.savetxt(fp,preds)

                #     subprocess.run(["mt2di.exe","-F",os.path.join(path,"temp","forward.mod"),os.path.join(path,"temp","temp.mea"),os.path.join(path,"temp","temp.res")],capture_output=False)
                #     content=[]
                #     with open(os.path.join(path,"temp","temp.res"),"r") as fp:
                #         content.extend([det.split() for det in fp])    
                #     # content=content[8:]
                #     content=[det for det in content if len(det)==11]

                #     # return np.array([abs(complex(float(det[-3]),float(det[-2])))/3.0 for _,det in enumerate(content)]) 
                #     data=[]
                #     [data.extend([abs(float(det[-3])/3.0),abs(float(det[-2])/3.0)]) for _,det in enumerate(content)]
                #     rms=float(tf.reduce_mean(tf.reduce_sum(tf.square(np.array(labels[0]).astype(np.float32)-np.array(data).astype(np.float32)))))
                #     print(rms)

                loss=mse+beta*kl
            
            gradients=tape.gradient(loss,model.trainable_variables)
            optimizer.apply_gradients(zip(gradients,model.trainable_variables))

            kl_loss_tracker.update_state(beta*kl)
            mse_loss_tracker.update_state(mse)
            # result_loss_tracker.update_state(result_loss)

        model.decoder_block.save("decoder.keras")
        decoder=tf.keras.models.load_model("decoder.keras")

        folder="test"
        fs=[fs for _,_,fs in os.walk(os.path.join(path,folder))][0][:]
        fs=[det for det in fs if ".dat" in det]
        mse=[100.0]*len(fs)
        for j,each in enumerate(fs):
            data=np.genfromtxt(os.path.join(path,folder,each))
            forward=os.path.splitext(each)[0]
            content=[]
            with open(os.path.join(path,folder,forward+".res"),"r") as fp:
                # content=np.array([float(i) for i in [det.split() for det in fp][0]]).astype(np.float32)
                content=np.array([float(i) for i in fp]).astype(np.float32)
            # data=tf.reshape(data,[-1,target,target])
            content=tf.reshape(content,[-1,content.shape[0]])
            
            # # z_mu,z_rho=model.encoder_block2(np.concatenate((model.encoder_block1(data),forward),axis=1))
            # # z_mu,z_rho=model.encoder_block2(model.encoder_block1(data))
            # # z_mu,z_rho=model.encoder_block(data)
            # z=tf.random.normal(shape=(1,latent_dim),mean=0.0,stddev=1.0)            
            # # z=z_mu+tf.math.softplus(z_rho)*z
            # z_lbl_concat=np.concatenate((z,content),axis=1)

            if os.path.exists(os.path.join(path,folder,forward)) is False:
                os.mkdir(os.path.join(path,folder,forward))
            if os.path.exists(os.path.join(path,folder,forward,"result")) is False:
                os.mkdir(os.path.join(path,folder,forward,"result"))
            
            for i in range(1):
                z=tf.random.normal(shape=(1,latent_dim),mean=0.0,stddev=1.0)
                z_lbl_concat=np.concatenate((z,content),axis=1)
                preds=decoder.predict(z_lbl_concat)
                np.savetxt(os.path.join(path,folder,forward,"result",str(index)+".pred"),preds.reshape((target,target)))
                # preds=model.decoder_block(z)
                
                difference=np.absolute(data-preds.reshape((target,target)))
                tmp=float(tf.reduce_mean(tf.reduce_sum(tf.square(difference.reshape((target*target))))))
                
                image=difference*255.
                plt.figure()
                plt.imshow(image,cmap='gray')
                plt.axis('Off')
                plt.savefig(os.path.join(path,folder,forward,"result",str(index)+"_.jpg"))
                if tmp<mse[j]:
                    plt.savefig(os.path.join(path,folder,forward,"result_.jpg"))
                plt.cla()
                plt.clf()
                plt.close()

                generated_digit=tf.reshape(preds[0],[target,target])
                generated_digit=(generated_digit*std)+mean
                generated_digit=generated_digit*255.
                image=generated_digit.numpy()
                plt.figure()
                plt.imshow(image,cmap='gray')
                plt.axis('Off')
                plt.savefig(os.path.join(path,folder,forward,"result",str(index)+".jpg"))
                if tmp<mse[j]:
                    mse[j]=tmp
                    plt.savefig(os.path.join(path,folder,forward,"result.jpg"))
                    np.savetxt(os.path.join(path,folder,forward,"result.pred"),preds.reshape((target,target)))
                plt.cla()
                plt.clf()
                plt.close()                 
        
        epoch_kl,epoch_mse=kl_loss_tracker.result(),mse_loss_tracker.result()
        error.append([index,epoch_kl*100.0,epoch_mse*100.0])
        print(f'epoch:{index},mse:{epoch_mse:.4f},kl_div:{epoch_kl:.4f}')

        kl_loss_tracker.reset_state()
        mse_loss_tracker.reset_state()
    
    with open("error.dat","w") as fp:
        for det in error:
            fp.write("\t".join([str(i) for i in det])+"\n")

if __name__=='__main__':

    beta,epochs,latent_dim=0.5,2000,50

    ds,mean,std=prepare_data()
    train(latent_dim,beta,epochs,ds,mean,std)
