import numpy as np
import os
from keras.models import load_model
from tqdm import tqdm
import tifffile
import cv2
import glob
from models.linknet import LinkNet
import argparse
from copy import deepcopy

GPU="0" # default GPU  #   不可为1 '/device:GPU:0'
# GPU="-1" # no GPU

NUM_CATEGORIES = 11

def sequential_to_las_labels(seq_labels):
    labels = deepcopy(seq_labels)
    # labels[:] = 65
    print("seq_labels.shape{}".format(seq_labels.shape))
    # print("seq_labels{}".format(seq_labels))
    # print("seq_labels[0]{}".format(seq_labels[0]

    labels[seq_labels == 0] = 0  # Unlabeled
    labels[seq_labels == 1] = 1  # Forest
    labels[seq_labels == 2] = 1  # Forest
    labels[seq_labels == 3] = 1  # Forest
    labels[seq_labels == 4] = 1  # Forest
    labels[seq_labels == 5] = 1  # Forest
    labels[seq_labels == 6] = 2  # Shrubland
    labels[seq_labels == 7] = 2  # Shrubland
    labels[seq_labels == 8] = 3  # Savanna
    labels[seq_labels == 9] = 3  # Savanna
    labels[seq_labels == 10] = 4  # Grassland
    labels[seq_labels == 11] = 5  #  Wetlands
    labels[seq_labels == 12] = 6  # Croplands
    labels[seq_labels == 14] = 6 # Croplands
    labels[seq_labels == 13] = 7  #Urban/Built-up
    labels[seq_labels == 15] = 8  # Snow/Ice
    labels[seq_labels == 16] = 9  # Barren
    labels[seq_labels == 17] = 10  # water
    # print("labels[0]{}".format(labels[0]))
    print("labels.shape{}".format(labels.shape))
    print(" type(labels){}".format(type(labels)))


    # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17
    # 0, 1, 1, 1, 1, 1, 2, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 10

    return labels

# convert category value image to RGB color image
def category_to_color(category_image):
    print("category_image{}".format(category_image[0]))
    print("category_image.shape{}".format(category_image.shape))

    # define colors
    # color table is here: https://www.rapidtables.com/web/color/RGB_Color.html
    colors = []
    # colors.append((165,42,42))      # 0  brown (ground)
    # colors.append((0,128,0))        # 1  green (trees)
    # colors.append((255,0,0))        # 2  red (buildings)
    # colors.append((0,0,255))        # 3  blue (water)
    # colors.append((128,128,128))    # 4  gray (elevated road)
    # colors.append((0,0,0))          # 6  black (other)

    colors.append((0, 0, 0))  # 0 Unlabeled
    colors.append((0, 153, 0))  # 1 Forest
    colors.append((198, 176, 68))  # 2 Shrubland
    colors.append((251, 255, 19))  # 3 Savanna
    colors.append((182, 255, 5))  # 4 Grassland
    colors.append((39, 255, 135))  # 5 Wetlands
    colors.append((194, 79, 68))  # 6 Croplands
    colors.append((165, 165, 165))  # 7 Urban/Built-up
    colors.append((105, 255, 248))  # 8 Snow/Ice
    colors.append((249, 255, 164))  # 9 Barren
    colors.append((28, 13, 255))  # 10 Water
  
    # convert categories to color image
    rows = category_image.shape[0]
    cols = category_image.shape[1]
    categories = category_image.astype(np.uint8)
    categories = np.reshape(categories, [rows, cols])
    rgb_image = cv2.cvtColor(categories,cv2.COLOR_GRAY2RGB)
    for i in range(cols):
        for j in range(rows):
            rgb_image[j,i,:] = colors[categories[j,i]]
    # print("rgb_image{}".format(rgb_image[0]))
    print("rgb_image.shape{}".format(rgb_image.shape))
    print(" type(rgb){}".format(type(rgb_image)))
    return rgb_image

if __name__=="__main__":

    '''
    python C:/Users/Du/Desktop/linknet_test/val.py /content/drive/My Drive/colab/igrss/s2_validation/ /content/drive/My Drive/colab/igrss/linknet_test/output/ /content/drive/My Drive/colab/igrss/linknet_test/checkpoints/linknet.h5
    '''

    # parse arguments
    parser = argparse.ArgumentParser()
    # test_folder = 'D:/igrss2019test/Validate-Track2/Track2/'
    # output_folder = 'D:/igrss2019test/Validate-Track2/Track2-Submission/'
    # model_file = './weights/190101-us3d.icnet.weights.18-3.h5'
    parser.add_argument('test_folder', type=str, default='/content/drive/My Drive/colab/igrss/s2_validation/')
    parser.add_argument('output_folder', type=str, default='/content/drive/My Drive/colab/igrss/linknet_test/output')
    parser.add_argument('model_file', type=str, default='/content/drive/My Drive/colab/igrss/linknet_test/checkpoints/linknet.h5')
    args = parser.parse_args()

    # load the model
    height = 256
    width = 256
    bands = 13
    print(height, width, bands)
    os.environ["CUDA_VISIBLE_DEVICES"]=GPU
    model = LinkNet(11, input_shape=(256,256,13))
    model = model.get_model(
        pretrained_encoder=False, weights_path='/content/drive/My Drive/colab/igrss/linknet_test/checkpoints/checkpoints/'
    )
    # model = model_icnet.build_icnet(height, width, bands, NUM_CATEGORIES+1, weights_path=args.model_file, train=False)

    # predict semantic segmentation for all images in folder
    files = glob.glob(args.test_folder + '*.tif')
    nfiles = len(files)
    print('Number of files = ', nfiles)
    for i in tqdm(range(nfiles)):
        name = files[i]
        # pos = name.find('*')
        left_name = name
        name = os.path.basename(name)
        pos = name.find('.tif')
        cls_name = args.output_folder + name[0:20] + 'dfc' + name[22:pos] + '.tif'
        viz_name = args.output_folder + name[0:20] + 'dfc' + name[22:pos] + '.tif'
        img = tifffile.imread(left_name)
        # print(" \ntiffimg.shape{}".format(img.shape))
        # print(" \ntiff_type(img[0,0]){}".format(type(img[0,0,0])))
        # img=np.array(img,dtype='uint8') 
        # print(" arrayunit8(img[0,0]){}".format(type(img[0,0,0])))
        # print(" arrayunit8.shape){}".format(img.shape))


        print("a+++++++++++++++++++++++++++++aBbbbbbbbbbjjjjjjjjjcccccss\ddddeefffffffevvvvv")
        print(" img.shape{}".format(img.shape))#img.shape(256, 256, 13)
        img = np.expand_dims(img,axis=0)
        print(" img.shape{}".format(img.shape))
        print(" type(img){}".format(type(img[0,0])))#(1, 256, 256, 13)
        img = img/255.0
        # img/127.5 - 1

        pre=model.predict(img)
        # print(" pre[0] {}".format(pre[0]))
        #------------------------------predict
        print(" typepre(pre){}".format(type(pre[0,0,0,0])))
        print(" pre.shape {}".format(pre.shape))  #pre.shape (1, 256, 256, 11)
        seg = np.argmax(pre[0,:,:,0:NUM_CATEGORIES],axis=2)# seg.shape (256, 256)
        print(" seg.shape {}".format(seg.shape))# model fanhui de shi num??++++++++++++++
        # print(" seg_argmax{}".format(seg[0]))
        print(" type_argmax(seg){}".format(type(seg[0,0])))
        
        # seg = np.expand_dims(seg, axis=0)#+++++++++++++++++++
        # img = tifffile.imread(left_name)
        # # img = np.expand_dims(img,axis=0)
        # # img = (img - 127.5)/255.0
        # seg = np.argmax(model.predict(img)[0,:,:,0:NUM_CATEGORIES],axis=2)
        # save RGB version of image for visual inspection
        # tifffile.imsave(viz_name, category_to_color(seg))
        # save with LAS classification labels for metric analysis
        
        # seg.dtype = 'uint8'
        # seg.astype('uint8')
        # print(" data_astype(seg_end){}".format(type(seg[0,0])))
        # print("seg_unit8.shape{}".format(seg.shape))
        seg = sequential_to_las_labels(seg)
        print(" datatype(seg_end){}".format(type(seg[0,0])))
        print(" seg_array.shape{}".format(seg.shape))
        seg=np.array(seg,dtype='uint8') 
        print(" seg_array.shape){}".format(seg.shape))
        print(" type(seg_array.shape){}".format(type(seg[0,0])))
        # seg = np.argmax(seg[0,:,:,0:13],axis=2)# seg.shape (256, 256)
        print(" type(seg_end.shape){}".format(seg.shape))
        print(" datatype(seg_end){}".format(type(seg[0,0])))
        tifffile.imsave(cls_name, seg,)
