import os
import numpy as np
from PIL import Image
import tensorflow as tf
import matplotlib.pyplot as plt
from numpy import *


def to_one_hot_tensor(batch_labels, class_num):
    batch_size = tf.size(batch_labels)
    batch_labels = tf.expand_dims(batch_labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    concated = tf.concat([indices, batch_labels], 1)
    onehot_labels = tf.sparse_to_dense(concated, tf.stack([batch_size, class_num]), 1)
    return onehot_labels


def to_one_hot_np(y, C):
    # y is a list
    return np.eye(C,dtype=int32)[y]


def get_file(file_top_dir, ratio=0.9):
    class_names = []
    for dir in os.listdir(file_top_dir):
        if os.path.isdir(os.path.join(file_top_dir, dir)):
            class_names.append(dir)
    class_names_to_ids = dict(zip(class_names, range(len(class_names))))
    ids_to_class_names = {}
    for item in class_names_to_ids.items():
        ids_to_class_names[item[1]] = item[0]

    image_list = []
    label_list = []
    for class_name in class_names:
        for file in os.listdir(os.path.join(file_top_dir, class_name)):
            if file.startswith("."):
                print("ignoring:".format(os.path.join(file_top_dir, class_name, file)))
                continue
            extension = os.path.splitext(file)[1]
            if extension != ".jpg" and extension != ".png" and extension != ".jpeg":
                print("ignoring:".format(os.path.join(file_top_dir, class_name, file)))
                continue
            image_list.append(os.path.join(file_top_dir, class_name, file))
            label_list.append(class_names_to_ids[class_name])
    im_label_dict = np.vstack((image_list, label_list))
    im_label_dict = im_label_dict.transpose()
    np.random.shuffle(im_label_dict)

    div_pos = (math.ceil(len(label_list) * ratio))
    train_im_label_dict = im_label_dict[:div_pos]
    val_im_label_dict = im_label_dict[div_pos:]

    # train image , train label , val image ,val label
    return list(train_im_label_dict[:, 0]), \
           [int(i) for i in list(train_im_label_dict[:, 1])], \
           list(val_im_label_dict[:, 0]), \
           [int(i) for i in list(val_im_label_dict[:, 1])], \
           class_names_to_ids, \
           ids_to_class_names


def dump_net(ckpt_path):
    from tensorflow.python import pywrap_tensorflow
    reader2 = pywrap_tensorflow.NewCheckpointReader(ckpt_path)

    dic2 = reader2.get_variable_to_shape_map()
    for i in dic2:
        print(i, ':', dic2[i])
    print(len(dic2))


def get_batch_tensor(image, label, image_W, image_H, batch_size, capacity, class_num=None, gray=False):
    # step1：将上面生成的List传入get_batch() ，转换类型，产生一个输入队列queue
    # tf.cast()用来做类型转换
    image = tf.cast(image, tf.string)  # 可变长度的字节数组.每一个张量元素都是一个字节数组
    label = tf.cast(label, tf.int32)
    # tf.train.slice_input_producer是一个tensor生成器
    # 作用是按照设定，每次从一个tensor列表中按顺序或者随机抽取出一个tensor放入文件名队列。
    input_queue = tf.train.slice_input_producer([image, label])
    label = input_queue[1]
    image_contents = tf.read_file(input_queue[0])  # tf.read_file()从队列中读取图像

    # step2：将图像解码，使用相同类型的图像
    if gray is True:
        image = tf.image.decode_jpeg(image_contents, channels=1)
    else:
        image = tf.image.decode_jpeg(image_contents, channels=3)
    # jpeg或者jpg格式都用decode_jpeg函数，其他格式可以去查看官方文档

    # step3：数据预处理，对图像进行旋转、缩放、裁剪、归一化等操作，让计算出的模型更健壮。
    image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)
    # 对resize后的图片进行标准化处理
    image = tf.image.per_image_standardization(image)

    # step4：生成batch
    # image_batch: 4D tensor [batch_size, width, height, 3], dtype = tf.float32
    # label_batch: 1D tensor [batch_size], dtype = tf.int32
    image_batch, label_batch = tf.train.batch([image, label], batch_size=batch_size, num_threads=16, capacity=capacity)

    # 重新排列label，行数为[batch_size]
    label_batch = tf.reshape(label_batch, [batch_size])
    if class_num != None:
        label_batch = to_one_hot_tensor(label_batch, class_num)
    # image_batch = tf.cast(image_batch, tf.uint8)    # 显示彩色图像
    image_batch = tf.cast(image_batch, tf.float32)  # 显示灰度图
    # print(label_batch) Tensor("Reshape:0", shape=(6,), dtype=int32)
    return image_batch, label_batch
    # 获取两个batch，两个batch即为传入神经网络的数据


class BatchGenerator:
    def __init__(self, image, label, batch_size, class_num):
        self.m_image = image
        self.m_label = label
        self.m_batch_size = batch_size
        self.m_class_num = class_num
        self.m_cur_index = 0
        self.m_epoch = 0
        self.m_label = to_one_hot_np(self.m_label, self.m_class_num)


    def next_batch(self, verbose=False):
        out_image = []
        out_label = []
        # first get
        if verbose:
            print("index:{} all: {}".format(self.m_cur_index, len(self.m_image)))
        if self.m_cur_index == 0:
            if self.m_batch_size >= len(self.m_image):
                out_image = self.m_image
                out_label = self.m_label
                self.m_epoch += 1
            else:
                out_image = self.m_image[:self.m_cur_index + self.m_batch_size]
                out_label = self.m_label[:self.m_cur_index + self.m_batch_size]
                self.m_cur_index += self.m_batch_size
        # loop get
        else:
            if (self.m_batch_size + self.m_cur_index) >= len(self.m_image):
                out_image = self.m_image[self.m_cur_index:]
                out_label = self.m_label[self.m_cur_index:]
                self.m_cur_index = 0
                self.m_epoch += 1
                if verbose:
                    print(" data set epoch:{}".format(self.m_epoch))
            else:
                out_image = self.m_image[self.m_cur_index:self.m_cur_index + self.m_batch_size]
                out_label = self.m_label[self.m_cur_index:self.m_cur_index + self.m_batch_size]
                self.m_cur_index += self.m_batch_size
        if verbose:
            print("shape label:{}".format(out_label.shape))
        # print(out_label)
        return (out_image, out_label, self.m_epoch)
