#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 19 14:07:39 2021

@author: ljl
"""
#import tensorflow as tf    #1.x
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
print("tensorflow:  ", tf.__version__)

import cv2 as cv
import numpy as np
import PIL.Image as Image
import sys, pickle
import os
import glob
import copy
from skimage import io, transform
import matplotlib.pyplot as plt
from tensorflow.python.keras.datasets.cifar import load_batch
from tensorflow.python.keras import backend as K

import stat
import shutil

##########

class FILE_op():
    def __init__(self):
        className = 'FILE_OP'

    def createDir(self, dirPath):
        if os.path.exists(dirPath):
            return
        else:
            os.makedirs(dirPath)
    def pickle_write(self, data, path):
        '''
        path='test_pkl.pkl'
        '''
        with open(path, 'wb') as f:
            pickle.dump(data, f)
    def pickle_read(self,path):
        assert os.path.exists(path)
        with open(path, 'rb') as f:
            return pickle.load(f)

    def writeFile(self, path, infos):
        """
        open(path, 'w')
        infos: [str1, str2, ...]
        """
        with open(path, 'w') as ofs:
            for info in infos:
                ofs.write(info)

    def readFile(self, path):
        with open(path, 'r') as f:
            data = f.readlines()
        return data

class image_op():
    def __init__(self):
        className = 'image op'

    def images_preProcess(self, images, mode='train',
                        flip=False,
                        crop=False, crop_shape=(24,24,3), cropMode='centor',
                        whiten=False,
                        noise=False, noise_mean=0, noise_std=0.01,
                        hue=False, max_delta_hue=0.05,
                        contrast=False, lower_ct=0.3, upper_ct=1.0,
                        brightness=False, max_delta_bs=0.2,
                        saturation=False, lower_sn=0.0, upper_sn=2.0,
        ):
        # 图像切割
        outImages = images
        if crop:
            if mode == 'train':
                outImages = self._image_crop(images, shape=crop_shape, cropMode=cropMode)
            elif mode == 'test':
                outImages = self._image_crop(images, shape=crop_shape, cropMode=cropMode)
        # 图像翻转
        if flip:
            outImages = self._image_flip(images)
        # 图像白化
        if whiten:
            outImages = self._image_whitening(images)
        # 图像噪声
        if noise:
            outImages = self._image_noise(images, mean=noise_mean, std=noise_std)

        if hue:
            outImages = self._image_hue(images, max_delta=max_delta_hue)
        if contrast:
            outImages = self._image_contrast(images, lower=lower_ct, upper=upper_ct)
        if brightness:
            outImages = self._image_brightness(images, max_delta=max_delta_bs)
        if saturation:
            outImages = self._image_saturation(images, lower=lower_sn, upper=upper_sn)
        return outImages
    
    def _image_flip(self, images, mode=1):
        '''
        0.5 chance to flip image
        mode: 1 -> left right flip
        '''
        # 图像翻转
        outImages = images.copy()
        for i in range(images.shape[0]):
            old_image = images[i,:,:,:]
            if np.random.random() < 0.5:
                new_image = cv.flip(old_image, mode)
            else:
                new_image = old_image
            outImages[i,:,:,:] = new_image
        return outImages

    def plot_images(self, images, lable_true, lable_true_names=None, lable_pred=None, smooth=True, is_show=False, path='./plot_images.jpg', x_n=4, y_n=4):
        '''
        images = images_test[0:9]
        label_true = label[0:9]
        plot_images(images, label_true, smooth=False)
        '''
        assert len(images) == len(lable_true) >= x_n*y_n
        if len(images) > x_n*y_n:
            idx = np.random.choice(len(images), size=x_n*y_n, replace=False)
            imgs = images[0:(x_n*y_n), :, :, :]
            for i in range(len(idx)):
                imgs[i, :, :, :] = images[idx[i],:, :, :]
            images = imgs
            lable_true = lable_true[idx]
            if lable_pred is not None:
                lable_pred = lable_pred[idx]

        #create figure with sub-plots
        fig, axes = plt.subplots(x_n,y_n)
        fig.suptitle(path)

        #adjust vertical spacing if we need to print ensemble and best-net
        if lable_pred is None:
            hspace = 0.3
        else:
            hspace = 0.6
        fig.subplots_adjust(hspace=hspace, wspace=0.3)
        debug = axes.flat
        for i, ax in enumerate(axes.flat):
            if smooth:
                interpolation = 'spline16'
            else:
                interpolation = 'nearest'

            #plot image
            ax.imshow(images[i, :, :, :], interpolation=interpolation)
            #name of the true class
            label_true_name = 'None'
            if lable_true_names is None:
                label_true_name = lable_true[i]
            else:
                label_true_name = lable_true_names[lable_true[i]]

            #show true and predicted classes
            if lable_pred is None:
                xlabel = 'True: {0}'.format(label_true_name)
            else:
                # name of the predicted class
                if lable_true_names is None:
                    pred_name = lable_pred[i]
                else:
                    pred_name = lable_true_names[lable_pred[i]]
                xlabel = "True: {0}\nPred: {1}".format(label_true_name, pred_name)

            ax.set_xlabel(xlabel)

            #remove ticks from the plot
            ax.set_xticks([])
            ax.set_yticks([])
        if is_show:

            plt.show()
        else:

            plt.savefig(path)

    def plot_image2(self, image):
        fig, axes = plt.subplots(1, 2)

        ax0 = axes.flat[0]
        ax1 = axes.flat[1]

        ax0.imshow(image, interpolation='nearest')
        ax1.imshow(image, interpolation='spline16')

        ax0.set_xlabel('Raw')
        ax1.set_xlabel('Smooth')

        plt.show()

    def create_random_batch(self, images, labels, batch_size=64):
        '''
        replace = False,  get batch_size simple and don't place back
        images[idx, :, :, :], labels[idx, :]
        '''
        num_images = len(images)
        idx = np.random.choice(num_images, size=batch_size, replace=False)
        return idx, images[idx, :, :, :], labels[idx, :]

    def plot_example_errors(self, images, pred, true_lable):
        '''
        this function is called from
        pred list is an array of the predicted class-number for all images in the test-set

        correct list is a boolean array whether the predicted class is equal to the true class for each image in the test-set
        '''

        idx, x, y = self.create_random_batch(images=images, labels=true_lable, batch_size=9)
        self.plot_images(images=x, label_true=y, label_pred=pred[idx])

    def arrayStack_v(self, array_a, array_b):
        return np.vstack( (array_a, array_b) )

    def splicingImgs(self, images, images_lable, x_n=100, y_n=100, path='./splicimg.jpg'):
        assert len(images) == len(images_lable)
        img = images[0]
        img_empty = img * 0
        N = len(images)
        row, col, c = img.shape

        outImg = Image.new('RGB', (x_n*col, y_n*row), (0,0,0))
        count = 0
        for y in range(1, y_n+1):
            for x in range(1, x_n+1):
                if count < N:
                    img = images[count]
                else:
                    img = img_empty
                img = Image.fromarray(np.uint8(img*255))
                outImg.paste(img, ( (x-1)*col, (y-1)*row) )
                count += 1

        file_op = FILE_op()
        fileName = path[0:path.find('.jpg')]
        file_op.pickle_write(data=images_lable[0:count], path= fileName + '.pkl')
        info = 'N' + str(count) + 'row' + str(row) + 'col' + str(col) + 'c' + str(c)
        file_op.writeFile(path=fileName + '.txt', infos=info)
        return outImg.save(path)

    def load_splicingImgs(self, imgPath, imgrow, imgcol, x_n=100, y_n=100):
        '''
        inImg:  [row, col, c]
        '''
        assert os.path.exists(imgPath)

        file_op = FILE_op()
        fileName = imgPath[0:imgPath.find('.jpg')]
        lable = file_op.pickle_read(path=fileName + '.pkl')
        imgsetInfo = file_op.readFile(path=fileName+'.txt')[0]


        inImg = Image.open(imgPath)
        inImg = np.asarray(inImg, dtype='uint8')/255.0
        row, col, c = inImg.shape

        count = 0
        idx1 = imgsetInfo.find('N')+1
        idx2 = imgsetInfo.find('row')
        imgNum = int(imgsetInfo[idx1:idx2] )
        outImg = np.zeros(shape=[imgNum, imgrow, imgcol, c])

        for y in range(y_n):
            if count >= imgNum:
                break
            for x in range(x_n):
                if count >= imgNum:
                    break
                x_ = x*imgcol
                y_ = y*imgrow

                img = inImg[y_:y_+imgrow, x_:x_+imgcol, :]
                outImg[count:count+1, :, :, :] = img
                count += 1

        return outImg, lable


##### images set genarate
class imageSet_op(image_op):
    def __init__(self):
        className = 'image op'
        self.config_gpu = tf.ConfigProto()  # 对session进行参数配置
        self.config_gpu.allow_soft_placement = True  # 如果你指定的设备不存在，允许TF自动分配设备
        self.config_gpu.gpu_options.per_process_gpu_memory_fraction = 0.8  # 分配百分之七十的显存给程序使用，避免内存溢出，可以自己调整 0.7
        self.config_gpu.gpu_options.allow_growth = True  # 按需分配显存，这个比较重要
        self.sess = tf.Session(config=self.config_gpu)

    def images_preProcess(self, images, mode='train',
                          flip=False,
                          crop=False, crop_shape=(24, 24, 3), cropMode='centor',
                          whiten=False,
                          noise=False, noise_mean=0, noise_std=0.01,
                          hue=False, max_delta_hue=0.05,
                          contrast=False, lower_ct=0.3, upper_ct=1.0,
                          brightness=False, max_delta_bs=0.2,
                          saturation=False, lower_sn=0.0, upper_sn=2.0,
                          ):
        # 图像切割
        outImages = images
        if crop:
            if mode == 'train':
                outImages = self._image_crop(images, shape=crop_shape, cropMode=cropMode)
            elif mode == 'test':
                outImages = self._image_crop(images, shape=crop_shape, cropMode=cropMode)
        # 图像翻转
        if flip:
            outImages = self._image_flip(images)
        # 图像白化
        if whiten:
            outImages = self._image_whitening(images)
        # 图像噪声
        if noise:
            outImages = self._image_noise(images, mean=noise_mean, std=noise_std)

        if hue:
            outImages = self._image_hue(images, max_delta=max_delta_hue)
        if contrast:
            outImages = self._image_contrast(images, lower=lower_ct, upper=upper_ct)
        if brightness:
            outImages = self._image_brightness(images, max_delta=max_delta_bs)
        if saturation:
            outImages = self._image_saturation(images, lower=lower_sn, upper=upper_sn)
        return outImages

    def _image_hue(self, images, max_delta=0.05):
        '''
        adjust image se xiang from [0, max_delta]
        '''
        N = len(images)
        outImages = images.copy()

        batch_size = 1000
        for batch in range(0, N, batch_size):
            start = batch
            end = batch + batch_size
            if end >= N:
                end = N
                batch_xs = images[start:end, :, :, :]
                newImages = tf.image.random_hue(batch_xs, max_delta=max_delta)
                out = self.sess.run(newImages)
                outImages[start:end, :, :, :] = out
                break
            batch_xs = images[start:end, :, :, :]
            newImages = tf.image.random_hue(batch_xs, max_delta=max_delta)
            out = self.sess.run(newImages)
            outImages[start:end, :, :, :] = out
        return outImages

    def _image_contrast(self, images, lower=0.3, upper=1.0):
        '''
        adjust from [lower,  upper]
        '''
        N = len(images)
        outImages = images.copy()

        batch_size = 1000

        for batch in range(0, N, batch_size):
            start = batch
            end = batch + batch_size
            if end >= N:
                end = N
                batch_xs = images[start:end, :, :, :]
                newImages = tf.image.random_contrast(batch_xs, lower=lower, upper=upper)
                out = self.sess.run(newImages)
                outImages[start:end, :, :, :] = out
                break
            batch_xs = images[start:end, :, :, :]
            newImages = tf.image.random_contrast(batch_xs, lower=lower, upper=upper)
            out = self.sess.run(newImages)
            outImages[start:end, :, :, :] = out
        return outImages

    def _image_brightness(self, images, max_delta=0.2):
        '''
        max_delta,  float, >0
        random from [-max_delta, max_delta]

        result: make the brightness of image change by max_delta
        '''
        N = len(images)
        outImages = images.copy()

        batch_size = 1000

        for batch in range(0, N, batch_size):
            start = batch
            end = batch + batch_size
            if end >= N:
                end = N
                batch_xs = images[start:end, :, :, :]
                newImages = tf.image.random_brightness(batch_xs, max_delta=max_delta)
                out = self.sess.run(newImages)
                outImages[start:end, :, :, :] = out
            batch_xs = images[start:end, :, :, :]
            newImages = tf.image.random_brightness(batch_xs, max_delta=max_delta)
            out = self.sess.run(newImages)
            outImages[start:end, :, :, :] = out
        return outImages

    def _image_saturation(self, images, lower=0.0, upper=2.0):
        outImages = images.copy()
        for i in range(images.shape[0]):
            old_image = images[i, :, :, :]
            new_image = tf.image.random_saturation(old_image, lower=lower, upper=upper)
            outImages[i, :, :, :] = new_image
        return outImages

    def _image_crop(self, images, shape, cropMode='centor'):
        '''
        centor crop
        shape = (24,24,3)
        cropMode == 'random'
        '''
        # 图像切割
        outImages = images.copy()
        if cropMode == 'random':
            for i in range(images.shape[0]):
                old_image = images[i, :, :, :]
                new_image = tf.random_crop(old_image, size=shape)
                outImages[i, :, :, :] = new_image
            return outImages
        else:
            for i in range(images.shape[0]):
                old_image = images[i, :, :, :]
                old_image = np.pad(old_image, [[4, 4], [4, 4], [0, 0]], 'constant')
                left = int((old_image.shape[0] - shape[0]) / 2)
                top = int((old_image.shape[1] - shape[1]) / 2)
                new_image = old_image[left: left + shape[0], top: top + shape[1], :]
                outImages[i, :, :, :] = new_image
            return outImages

    def _image_flip(self, images, mode=1):
        '''
        0.5 chance to flip image
        mode: 1 -> left right flip
        '''
        # 图像翻转
        outImages = images.copy()
        for i in range(images.shape[0]):
            old_image = images[i, :, :, :]
            if np.random.random() < 0.5:
                new_image = cv.flip(old_image, mode)
            else:
                new_image = old_image
            outImages[i, :, :, :] = new_image
        return outImages

    def _image_whitening(self, images):
        '''
        图像白化
        '''
        outImages = images.copy()
        for i in range(images.shape[0]):
            old_image = images[i, :, :, :]
            new_image = (old_image - np.mean(old_image)) / np.std(old_image)
            outImages[i, :, :, :] = new_image
        return outImages

    def _image_noise(self, images, mean=0, std=0.01):
        '''
        add 图像噪声
        '''
        outImages = images.copy()
        for i in range(images.shape[0]):
            noise_ = np.random.normal(mean, std ** 0.5, images[i, :, :, :].shape)
            new_image = images[i, :, :, :] + noise_
            if new_image.min() < 0:
                low_clip = -1
            else:
                low_clip = 0
            new_image = np.clip(new_image, 0, 1.0)
            # for i in range(old_image.shape[0]):
            #     for j in range(old_image.shape[1]):
            #         for k in range(old_image.shape[2]):
            #
            #             new_image[i, j, k] += np.random.randn(mean, std)
            outImages[i, :, :, :] = new_image
        return outImages



def tensorboard_show():
    os.system('tensorboard --logdir=./log --bind_all')
    print('show end ...')

def one_hot_10class(lables):
    lables_hot = []
    for lable in lables:
        hot = np.zeros([10], dtype=int)
        hot[lable] = 1
        lables_hot.append(hot)
    return np.asarray(lables_hot, np.int32)
def load_mnist_data():
    mnist = tf.keras.datasets.mnist
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    n, h, w = x_train.shape
    x_train = x_train.reshape(n, h, w, 1)
    n, h, w = x_test.shape
    x_test = x_test.reshape(n,h,w,1)

    # 归一化
    x_train = x_train/255 
    x_test = x_test/255

    #convert hot encode
    y_train = one_hot_10class(y_train)
    y_test = one_hot_10class(y_test)
    return x_train, y_train, x_test, y_test

def load_cifar10_dataOnline():
    cifar10 = tf.keras.datasets.cifar10
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    # n, h, w, c = x_train.shape
    # x_train = x_train.reshape(n, h, w, 3)
    # n, h, w = x_test.shape
    # x_test = x_test.reshape(n,h,w,3)

    # 归一化
    x_train = x_train/255 
    x_test = x_test/255

    #convert hot encode
    y_train = one_hot_10class(y_train)
    y_test = one_hot_10class(y_test)
    return x_train, y_train, x_test, y_test
def load_cifar10_data(path, normalization=True):
    num_train_samples = 50000

    x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
    y_train = np.empty((num_train_samples,), dtype='uint8')
    
    for i in range(1, 6):
        #fpath = os.path.join(path, 'data_batch_' + str(i))
        fpath = path + 'data_batch_' + str(i)
        (x_train[(i - 1) * 10000:i * 10000, :, :, :],y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
    
    fpath = os.path.join(path, 'test_batch')
    x_test, y_test = load_batch(fpath)
    
    y_train = np.reshape(y_train, (len(y_train), 1))
    y_test = np.reshape(y_test, (len(y_test), 1))
    
    if K.image_data_format() == 'channels_last':
        x_train = x_train.transpose(0, 2, 3, 1)
        x_test = x_test.transpose(0, 2, 3, 1)
    
    # n, h, w, c = x_train.shape
    # x_train = x_train.reshape(n, h, w, 3)
    # n, h, w = x_test.shape
    # x_test = x_test.reshape(n,h,w,3)
    
    if normalization:
        # 归一化
        x_train = x_train/255 
        x_test = x_test/255
    
    #convert hot encode
    y_train = one_hot_10class(y_train)
    y_test = one_hot_10class(y_test)
    return x_train, y_train, x_test, y_test
##################################


##########
def delete_file(filePath):
    if os.path.exists(filePath):
        for fileList in os.walk(filePath):
            for name in fileList[2]:
                os.chmod(os.path.join(fileList[0],name), stat.S_IWRITE)
                os.remove(os.path.join(fileList[0],name))
        shutil.rmtree(filePath)
        return "delete ok"
    else:
        return "no filepath"

def read_img(path, row, col,c, img_type):
    '''
    read 2 class

    path = "../SaveRoiImg/"
    img_type = ".png"

    dog
        0
            1.jpg
            2.jpg
            xx.jpg
        1
    
    '''
    cate = [x for x in os.listdir(path) if os.path.isdir(path + x)]      #0   1   2   3   4   5
    cate.sort()
    
    print('------------', cate)
    imgs = []
    labels = []
    np_label = np.zeros((1, len(cate)), dtype=np.int)
    org_label = list(np_label[0])
    count = 0
    for idx, folder in enumerate(cate):
        print ('\n dir is ', folder)

        namelist = glob.glob(path + folder + '/*')
        print(type(namelist))
        for im in namelist:
            count += 1
            print('\r'+'reading the image %d: %s' % (count,im), end='',flush=True)
            make_label = copy.deepcopy(org_label)
            make_label[idx] = 1
            # 0:dian_gui58-842   1:filter_things154-852   2:铁spots132-856   3：ping_tai857   4：rail371-835  5:ground41-818  6:line179-806  
            #7/11:foreign1-718  8/12:foreign2-750

            labels.append(make_label)
            if c == 3:
                img = cv.imread(im, cv.IMREAD_COLOR)
                
            elif c == 1:
                img = cv.imread(im, cv.IMREAD_GRAYSCALE)
            else:
                print("readImg error")
                sys.exit(0)

            img = transform.resize(img, (row, col, c), mode='constant')
            imgs.append(img)
    return np.asarray(imgs, np.float32), np.asarray(labels, np.int64)

###########################




def loadImgNameAB(rowDataPath):
    sampleA = []
    sampleB = []
    for file in os.listdir(rowDataPath):
        idxA = file.find('a')
        idxB = file.find('b')
        if ( idxA != -1):
            #a = file[idxA]
            sampleA.append(rowDataPath + "/" + file)
        if (idxB != -1):
            #b = file[idxB]
            sampleB.append(rowDataPath + "/" + file)
        
    return sampleA, sampleB

def loadImgName(inPath):
    imgPath = []
    name = []
    for file in os.listdir(inPath):
        imgPath.append(inPath + "/" + file)
        name.append(file)      #xxx.png
    return imgPath, name


#label: T or F
def mergeImg(path, label, savePath):
    imgPathA, imgPathB = loadImgNameAB(path)
    imgPathA.sort()
    imgPathB.sort()
    if(len(imgPathA) != len(imgPathB)):
        print("a, b len not equal")
        sys.exit()
    print("sample number: %d", len(imgPathA))
    
    count = 0
    for nameA in imgPathA:
        idx = nameA.find('a.png')
        tempB = nameA[0:idx]
        tempB = tempB + 'b.png'
        nameB = 'null'

        if os.path.exists(tempB):
            nameB = tempB
        else:
            print("path not exist", tempB)
            sys.exit()
        if nameB == 'null':
            print("%s is not find in nameB", nameA)
            sys.exit()
            
        if os.path.exists(nameA) and os.path.exists(nameB):
            imgA = cv.imread(nameA, 0)
            imgB = cv.imread(nameB, 0)

            print(imgA.shape)
            print(imgB.shape)
            zeros = np.zeros(imgA.shape[0:2], dtype="uint8")
            mergeImg = cv.merge( [imgA, imgB, zeros]  )
            
            cv.imwrite(savePath + "/" + label + str(count) + ".jpg", mergeImg)
            count += 1
                
   
#[n, row, col, c]
def generateDataSet(mergeImgPath, row, col):
    
    imgPathList, nameList = loadImgName(mergeImgPath)
    imgPathList.sort()
    nameList.sort()
    imgList = []
    lableList = []
    
    #label = np.zeros((1,2))
    size = len(imgPathList)
    for idx in range(size):
        imgTemp = cv.imread(imgPathList[idx])
        imgTemp = transform.resize(imgTemp, (row, col, imgTemp.shape[2]), mode='constant')
        
        imgList.append(imgTemp[:,:,0:2])
     
        if nameList[idx][0] == 'T':
            label = [0, 1]
            lableList.append(label)
        else:
            #print(nameList[idx])
            label = [1, 0]
            lableList.append(label)
            
    return np.asarray(imgList, np.float32), np.asarray(lableList, np.int32)

#0:diff  1:sim
#[1, 0],  [0,1]
def one_hot(labels):
    labels_hot = []
    for label in labels:
        if label == 0:
            labels_hot.append([1,0])
        else:
            labels_hot.append([0,1])
    return np.asarray(labels_hot, np.int32)

def _show_time(cost_time, is_print=True):
    #start_time = time.time()
    hour =   ( cost_time // 60 ) // 60
    minute = ( (cost_time) // 60 ) % 60
    second = (  cost_time) % 60
    if is_print:
        print('Running time:%f Second' % cost_time)  # 输出运行时间
        print('Running time: {:.0f}h {:.0f}m {:.0f}s '.format(hour, minute, second))
    return (hour, minute, second)


def slipImgChannel(imgs):
    if len(imgs.shape) != 4:
        print("imgs dims error: ", imgs.shape)
        sys.exit(0)
    a,b,c,d = imgs.shape
    img0_list = []
    img1_list = []
 
    for batch_img in range(a):
        temp1 = np.ones((b, c, 1)) 
        temp2 = np.ones((b, c, 1))
        
        temp1[:,:,0] = imgs[batch_img][:,:,0]
        img0_list.append(temp1)
        
        temp2[:,:,0] = imgs[batch_img][:,:,1]
        img1_list.append(temp2)
    return np.asarray(img0_list, np.float32), np.asarray(img1_list, np.float32)
    
def smoothVal(last, curr, weight = 0.7):

    if last == -1: last = curr
    smoothed = last * weight + (1 - weight)*curr
    return smoothed
    

def shuffleDatas(imgs, labels):
# 打乱顺序
    num_example = imgs.shape[0]
    arr = np.arange(num_example)  # arr = [0,1,2,........]
    np.random.shuffle(arr)
    return imgs[arr] , labels[arr]  # 打乱顺序
    

'''
saver = tf.train.Saver(max_to_keep=1)
netMessage = NETMESSAGE(Sess=sess)

......

netMessage.saveNet(saver=saver)


# save_path = './saved-model'
# print(imageReady.delete_file(save_path))       #detect the file_dir, delete it if it exits
# ckpt_file_path = save_path
# path_ = os.path.dirname(os.path.abspath(ckpt_file_path))
# if os.path.isdir(path_) is False:
#     os.makedirs(path_)
# saver = tf.train.Saver(max_to_keep=1)
# saver.save(sess, ckpt_file_path + '/model.ckpt',write_meta_graph=True)


    # #save info into tensorboard
    # merged = tf.summary.merge_all()
    # train_writer = tf.summary.FileWriter('./log', sess.graph)

    # ......

    # summary, _, cost_value,train_acc, pre = sess.run([merged, model.train_step, model.cross_entropy, model.accuracy, model.prediction],
    #                                         feed_dict = {x_input:batch_xs, y_input:batch_ys, is_train:True})
    # train_writer.add_summary(summary, epoch)
'''
class NETMESSAGE():
    def __init__(self, Sess=None):
        self.sess = Sess
    
    def saveNet(self,saver, save_path='./saved-model'):
        if self.sess == None:
            print("error:  don't input Session")
            sys.exit()
        #save loss curve as loss.jpg  and net model(ckpt)
        print(delete_file(save_path))       #detect the file_dir, delete it if it exits
        ckpt_file_path = save_path
        path_ = os.path.dirname(os.path.abspath(ckpt_file_path))
        if os.path.isdir(path_) is False:
            os.makedirs(path_)
        #saver = tf.train.Saver(max_to_keep=1)
        saver.save(self.sess, ckpt_file_path + '/model.ckpt', write_meta_graph=True)

    def showAllVariable(self):
        for var in tf.global_variables():
            print(var)

    def findVariable(self, name, isPrint=False, sess=None):

        list_val = []
        for var in tf.global_variables():
            str_s = str(var)
            if str_s.find(name) > 0:
                list_val.append(str(var))
                list_val.append(str(sess.run(var)))

                if isPrint:
                    print("tensor name:  ", var)
                    print(list_val[-1])
        return list_val

    #path = "./test.txt"
    def createNewFileInLocal(self, path, infos):
        """
        open(path, 'w')
        infos: [str1, str2, ...]
        """
        with open(path, 'w') as ofs:
            ofs.write("record info:\n")
            for info in infos:
                ofs.write(info)
    def readFile(self, path):
        with open(path, 'r') as f:
            data = f.readlines()
        return data


    def addFileToLocal(self, path, infos):
        """
        open(path, 'a')
        infos: [str1, str2, ...]
        """
        with open(path, 'a') as ofs:
            for info in infos:
                ofs.write(info)


'''
#save info into tensorboard
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('./log', sess.graph)

......

summary, _, cost_value,train_acc, pre = sess.run([merged, model.train_step, model.cross_entropy, model.accuracy, model.prediction],
                                                    feed_dict = {x_input:batch_xs, y_input:batch_ys, is_train:True})
train_writer.add_summary(summary, epoch)

save model >>>
#save model pb
    graph = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, 
                            ["x-input","y-input","keep_prob","learnRate","version", "softmax", "accurary/accuracy", "cross_entropy/loss_value"])
    tf.io.write_graph(graph, './models', 'model.pb',as_text= False)

    <<<

load model>>>
with tf.Session(graph=tf.Graph()) as sess:
    sess.run( tf.global_variables_initializer() )

    #load model
    modelPath = './saved-model'
    saver = tf.train.import_meta_graph('./saved-model' + '/model.ckpt.meta')
    saver.restore(sess, tf.train.latest_checkpoint(modelPath))
    graph = tf.get_default_graph()

    x_input = sess.graph.get_tensor_by_name('x_input:0')
    y_input = sess.graph.get_tensor_by_name('y_input:0')
    is_train = sess.graph.get_tensor_by_name('is_train:0')
    learnRate = sess.graph.get_tensor_by_name('learnRate:0')

    logit = sess.graph.get_tensor_by_name('stage_final/softmax:0')
    acc = sess.graph.get_tensor_by_name('accurary_scalar/accuracy:0')
    loss = sess.graph.get_tensor_by_name('cross_entropy/loss:0')


#self.train_step=tf.train.AdamOptimizer(self.learnRate).minimize(self.cross_entropy, name='train_opt')
    train_opt = sess.graph.get_operation_by_name('train_opt')
    
'''


def enhanceImgs(path):
    imgPaths, name = loadImgName(path)
    idx = 1
    savePath = "/home/ljl/Desktop/000deep_classfy/deepLearn-filter-coding/111/"
    for imgpath in imgPaths:
        idx= idx+1
        img = cv.imread(imgpath)
        img1 = img*0.5
        cv.imwrite(savePath + str(idx)+"_1.jpg", img1)
        img2 = img*1.5
        cv.imwrite(savePath+ str(idx)+"_2.jpg", img2)
def transImg(path):
    imgPaths, name = loadImgName(path)
    idx = 110
    savePath = "/home/ljl/Desktop/000deep_classfy/deepLearn-filter-coding/111/"
    for imgpath in imgPaths:
        idx= idx+1
        img = cv.imread(imgpath)
        htransImg = cv.flip(img,1, dst=None)  
        vtransImg = cv.flip(img,0, dst= None)
        cv.imwrite(savePath+ str(idx)+"_1trans.jpg", htransImg)
        cv.imwrite(savePath+ str(idx)+"_2trans.jpg", vtransImg)


if __name__ == '__main__':
    
#    mergeImg("/home/ljl/Desktop/newImagesData/1", 'T', "/home/ljl/Desktop/newImagesData/merge")
#    mergeImg("/home/ljl/Desktop/newImagesData/0", 'F', "/home/ljl/Desktop/newImagesData/merge")

    enhanceImgs("/home/ljl/Desktop/000deep_classfy/deepLearn-filter-coding/new_train_datas2021_11_28/2")
    #transImg("/home/ljl/Desktop/000deep_classfy/deepLearn-filter-coding/new_train_datas2021_11_28 (copy)/0")

    sys.exit(0)
    imgs, labels = generateDataSet("/home/ljl/Desktop/newImagesData/merge", row,col)
    print(imgs.shape)
    print(labels.shape)


    #data, label = read_img(path)
    #data, label = shuffleDatas(data, label)

    sys.exit()
    batch_left, batch_right = slipImgChannel(imgs)
    print(batch_left.shape)

    start = 0 
    for idx in range(2):
        print (idx)
        print(batch_left.shape)
        cv.imshow("aaa", batch_left[idx + start])
        cv.imshow("bbb", batch_right[idx + start])
        cv.waitKey(0)
    cv.destroyAllWindows()
    sys.exit()

    for idx in range(20):
        print (idx)
        cv.imshow("aaa", imgs[idx + start][:,:,0])
        cv.imshow("bbb", imgs[idx + start][:,:,1])
        cv.waitKey(0)
    cv.destroyAllWindows()





    
