# from skimage import io,transform
# import glob
# import os
# import tensorflow as tf
# import numpy as np
# import time
#
# path = r'flower_photos/'#数据存放路径
# model_path = './medl/'#模型保存路径
#
# #设置图像处理之后的大小(由于是RGB格式数据，长宽高分别是100*100*3)
# w = 100
# h = 100
# c = 3
#
# #读取指定目录的图片信息
# def read_img(path):
#     #推到式
#     cate =  [path+x for x in os.listdir(path) if os.path.isdir(path+x)]
#     # print(cate)
#     imgs = []
#     labels = []
#     for idx,folder in enumerate(cate):
#         print(idx,folder)
#         for im in glob.glob(folder+'/*.jpg'):      #读取文件
#             # print('reading the images:%s'%(im))
#             img = io.imread(im)
#             img = transform.resize(img,(w,h))
#             imgs.append(img)
#             labels.append(idx)
#     return np.asarray(imgs,np.float32),np.asarray(labels,np.float32)
# # read_img(path)
# def divide_train_test(data,labels,ratio = 0.8): #将所有数据划分为训练集和测试集
#     idx = np.int(data.shape[0]*ratio)
#     x_train = data[:idx]
#     y_train = label[:idx]
#     x_val = data[idx:]
#     y_val = label[idx:]
#     return  x_train,y_train,x_val,y_val
#
# '''
# 搭建神经网络
# 创建卷积神经网络模型，该模型是整个实验的核心，原始模型只有5层隐藏层，包括两个卷积层，两个池化层，一个全连接层
# '''
# def model(input_tensor,regularizer,keep_prob):
#     #第一层卷积层，输入100*100*3，输出100*100*32
#     with tf.variable_scope('layer1-conv1'):
#         #权重
#         conv1_weights = tf.get_variable('weight',[5,5,3,32],initializer=tf.truncated_normal_initializer(stddev=0.1))
#         #偏执
#         conv1_biases = tf.get_variable("bias",[32],initializer=tf.constant_initializer(0.0))
#         #卷积
#         conv1 = tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding='SAME')
#         #激励函数
#         relu1 =tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
#     #第二层池化层，输入100*100*32  输出50*50*32   使用2*2的核做池化
#     with tf.name_scope("layer2-pool1"):
#         pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')
#
#     with tf.variable_scope("layer3-conv2"):
#         conv2_weights = tf.get_variable("weight",[5,5,32,64],initializer=tf.truncated_normal_initializer(stddev=0.1))
#         conv2_biases = tf.get_variable("bias",[64],initializer=tf.constant_initializer(0.0))
#         conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1,1,1,1],padding='SAME')
#         relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))
#
#     with tf.name_scope("layer4-pool2"):
#         pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')
#
#
#     with tf.variable_scope("layer5-conv3"):
#         conv3_weights = tf.get_variable("weight",[5,5,64,128],initializer=tf.truncated_normal_initializer(stddev=0.1))
#         conv3_biases = tf.get_variable("bias",[128],initializer=tf.constant_initializer(0.0))
#         conv3 = tf.nn.conv2d(pool2,conv3_weights,strides=[1,1,1,1],padding='SAME')
#         relu3 = tf.nn.relu(tf.nn.bias_add(conv3,conv3_biases))
#
#     with tf.name_scope("layer6-pool3"):
#         pool3 = tf.nn.max_pool(relu3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')
#
#     with tf.variable_scope("layer7-conv4"):
#         conv4_weights = tf.get_variable("weight",[5,5,128,128],initializer=tf.truncated_normal_initializer(stddev=0.1))
#         conv4_biases = tf.get_variable("bias",[128],initializer=tf.constant_initializer(0.0))
#         conv4 = tf.nn.conv2d(pool3,conv4_weights,strides=[1,1,1,1],padding='SAME')
#         relu4 = tf.nn.relu(tf.nn.bias_add(conv4,conv4_biases))
#
#     with tf.name_scope("layer8-pool4"):
#         pool4 = tf.nn.max_pool(relu4,ksize=[1,2,2,1],strides=[1,2,2,1],padding="VALID")
#
#         #layer8层的输出是矩阵：[6,6,128],layer9的输入时向量，所以需要把layer的输出转化为矩阵
#         nodes = 6*6*128
#         reshaped = tf.reshape(pool4,[-1,nodes])
#         print("shape of reshaped:",reshaped.shape)
#
#     with tf.variable_scope("layer9-fc1"):
#         fc1_weights = tf.get_variable("weight",[nodes,1024],initializer=tf.truncated_normal_initializer(stddev=0.1))
#         if regularizer != None:
#             tf.add_to_collection('losses', regularizer(fc1_weights))            #给全连接层的权重添加正则项：tf.add_to_collection函数可以把变量放入一个集合，把很多变量变成一个列表
#         fc1_biases = tf.get_variable("bias",[1024],initializer=tf.constant_initializer(0.1))
#         fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biases)
#         fc1 = tf.nn.dropout(fc1,keep_prob=keep_prob)
#
#     with tf.variable_scope("layer10-fc2"):
#         fc2_weights = tf.get_variable("weight",[1024,512],initializer=tf.truncated_normal_initializer(stddev=0.1))
#         if regularizer != None:
#             tf.add_to_collection("losses",regularizer(fc2_weights))
#             #给全连接层的权重添加正则项：tf.add_to_collection函数可以把变量放入一个集合，把很多变量变成一个列表
#             fc2_biases = tf.get_variable("bias",[512],initializer=tf.constant_initializer(0.1))
#             fc2 = tf.nn.relu(tf.matmul(fc1,fc2_weights)+fc2_biases)
#             fc1 = tf.nn.dropout(fc1, keep_prob=keep_prob)
#
#     with tf.variable_scope("layer11-fc3"):
#         fc3_weights = tf.get_variable("weight",[512,5],initializer=tf.truncated_normal_initializer(stddev=0.1))
#         if regularizer != None:
#             tf.add_to_collection("losses",regularizer(fc3_weights))
#             #给全连接层的权重添加正则项：tf.add_to_collection函数可以把变量放入一个集合，把很多变量变成一个列表
#             fc3_biases = tf.get_variable("bias",[5],initializer=tf.constant_initializer(0.1))
#             logit = tf.matmul(fc2,fc3_weights) + fc3_biases
#     return logit
#
# '''
# 定义一个函数，按批次取数据
# #从训练集获取数据，最后一个参数表示打乱
# '''
# def minibatch(inputs,lables,batch_size,shuffle=False):
#     if shuffle:
#         indices = np.arange(len(inputs))
#         np.random.shuffle(indices)
#     for start_idx in range(0,len(inputs) - batch_size + 1,batch_size):
#         if shuffle:
#             excerpt = indices[start_idx:start_idx+batch_size]
#         else:
#             excerpt = slice(start_idx,start_idx+batch_size)
#         yield inputs[excerpt],lables[excerpt]
#
# data,label = read_img(path)
# num_example = data.shape[0]
# arr = np.arange(num_example)
# np.random.shuffle(arr)
# data = data[arr]
# label = label[arr]
#
# x_train,y_train,x_val,y_val = divide_train_test(data,label)
# x=tf.placeholder(tf.float32,shape=[None,w,h,c],name='x')
# y_=tf.placeholder(tf.int32,shape=[None,],name='y_')  #  x图片所对应的label
# keep_prob = tf.placeholder(tf.float32,name='keep_prob')
#
#
# #添加正规化
# regularizer = tf.contrib.layers.l1_regularizer(0.001)
# logits = model(x,regularizer,keep_prob)
# # (小处理)将logits乘以1赋值给logits_eval，定义name，方便在后续调用模型时通过tensor名字调用输出tensor
# b = tf.constant(value=1, dtype=tf.float32)
# logits_eval = tf.multiply(logits, b, name='logits_eval')
#
# loss=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_)
# train_op=tf.train.AdamOptimizer(0.001).minimize(loss)
# correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_)
# acc= tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#
# n_epoch = 2
# batch_size = 64
# saver = tf.train.Saver()
# sess = tf.Session()
# sess.run(tf.global_variables_initializer())
# for epoch in range(n_epoch):
#     train_loss,train_acc,n_batch = 0,0,0
#     for x_train_a, y_train_a in minibatch(x_train, y_train, batch_size, shuffle=True):
#         _, err, ac = sess.run([train_op, loss, acc], feed_dict={x: x_train_a, y_: y_train_a,keep_prob:1.0})
#         train_loss += err;
#         train_acc += ac;
#         n_batch += 1
#     print("   train loss: %f" % (np.sum(train_loss) / n_batch))
#     print("   train acc: %f" % (np.sum(train_acc) / n_batch))
# saver.save(sess,model_path)
# sess.close()
#


#!/usr/bin/env python
# -*- coding:utf-8 -*-
from skimage import  io,transform
import glob
import os
import tensorflow as tf
import numpy as np
import time
path = r'flower_photos/'#数据存放路径
model_path = r'./medl/model'
#将所有的图片resize成100*100
w=100
h=100
c=3


#读取图片
def read_img(path):
    cate=[path+x for x in os.listdir(path) if os.path.isdir(path+x)]
    imgs=[]
    labels=[]
    for idx,folder in enumerate(cate):
        for im in glob.glob(folder+'/*.jpg'):
            #print('reading the images:%s'%(im))
            img=io.imread(im)
            img=transform.resize(img,(w,h))
            imgs.append(img)
            labels.append(idx)
    return np.asarray(imgs,np.float32),np.asarray(labels,np.int32)
data,label=read_img(path)


#打乱顺序
num_example=data.shape[0]
arr=np.arange(num_example)
np.random.shuffle(arr)
data=data[arr]
label=label[arr]


#将所有数据分为训练集和验证集
ratio=0.8
s=np.int(num_example*ratio)
x_train=data[:s]
y_train=label[:s]
x_val=data[s:]
y_val=label[s:]

#-----------------构建网络----------------------
#占位符
x=tf.placeholder(tf.float32,shape=[None,w,h,c],name='x')
y_=tf.placeholder(tf.int32,shape=[None,],name='y_')

def inference(input_tensor, train, regularizer):
    with tf.variable_scope('layer1-conv1'):
        conv1_weights = tf.get_variable("weight",[5,5,3,32],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("bias", [32], initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))

    with tf.name_scope("layer2-pool1"):
        pool1 = tf.nn.max_pool(relu1, ksize = [1,2,2,1],strides=[1,2,2,1],padding="VALID")

    with tf.variable_scope("layer3-conv2"):
        conv2_weights = tf.get_variable("weight",[5,5,32,64],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable("bias", [64], initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

    with tf.name_scope("layer4-pool2"):
        pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    with tf.variable_scope("layer5-conv3"):
        conv3_weights = tf.get_variable("weight",[3,3,64,128],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv3_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
        conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))

    with tf.name_scope("layer6-pool3"):
        pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    with tf.variable_scope("layer7-conv4"):
        conv4_weights = tf.get_variable("weight",[3,3,128,128],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv4_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
        conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))

    with tf.name_scope("layer8-pool4"):
        pool4 = tf.nn.max_pool(relu4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
        nodes = 6*6*128
        reshaped = tf.reshape(pool4,[-1,nodes])

    with tf.variable_scope('layer9-fc1'):
        fc1_weights = tf.get_variable("weight", [nodes, 1024],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights))
        fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1))

        fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
        if train: fc1 = tf.nn.dropout(fc1, 0.5)

    with tf.variable_scope('layer10-fc2'):
        fc2_weights = tf.get_variable("weight", [1024, 512],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights))
        fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1))

        fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases)
        if train: fc2 = tf.nn.dropout(fc2, 0.5)

    with tf.variable_scope('layer11-fc3'):
        fc3_weights = tf.get_variable("weight", [512, 5],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights))
        fc3_biases = tf.get_variable("bias", [5], initializer=tf.constant_initializer(0.1))
        logit = tf.matmul(fc2, fc3_weights) + fc3_biases

    return logit

#---------------------------网络结束---------------------------
regularizer = tf.contrib.layers.l2_regularizer(0.0001)
logits = inference(x,False,regularizer)

#(小处理)将logits乘以1赋值给logits_eval，定义name，方便在后续调用模型时通过tensor名字调用输出tensor
b = tf.constant(value=1,dtype=tf.float32)
logits_eval = tf.multiply(logits,b,name='logits_eval')

loss=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_)
train_op=tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_)
acc= tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


#定义一个函数，按批次取数据
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
    assert len(inputs) == len(targets)
    if shuffle:
        indices = np.arange(len(inputs))
        np.random.shuffle(indices)
    for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
        if shuffle:
            excerpt = indices[start_idx:start_idx + batch_size]
        else:
            excerpt = slice(start_idx, start_idx + batch_size)
        yield inputs[excerpt], targets[excerpt]


#训练和测试数据，可将n_epoch设置更大一些

n_epoch=2
batch_size=64
saver=tf.train.Saver()
sess=tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(n_epoch):
    start_time = time.time()

    #training
    train_loss, train_acc, n_batch = 0, 0, 0
    for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):
        _,err,ac=sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a})
        train_loss += err; train_acc += ac; n_batch += 1
    print("   train loss: %f" % (np.sum(train_loss)/ n_batch))
    print("   train acc: %f" % (np.sum(train_acc)/ n_batch))

    #validation
    val_loss, val_acc, n_batch = 0, 0, 0
    for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):
        err, ac = sess.run([loss,acc], feed_dict={x: x_val_a, y_: y_val_a})
        val_loss += err; val_acc += ac; n_batch += 1
    print("   validation loss: %f" % (np.sum(val_loss)/ n_batch))
    print("   validation acc: %f" % (np.sum(val_acc)/ n_batch))
saver.save(sess,model_path)
sess.close()


