# -*- coding:utf-8 -*-
import  os
import cv2
import numpy as np
import glob
import tensorflow as tf
from  tensorflow.contrib.layers import *
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base
import numpy as np
from random import shuffle
#清纯类型  性感类型
age_table=['qingchun','xinggan']
label_size=len(age_table)
data_set=[]
max_epoch=300
batch_size=50
IMAGE_HEIGHT=112
IMAGE_WIDTH=92
modelpath='/work/liuwei/model/age.ckpt'

#read image
def get_image(filepath):
     img=cv2.imread(filepath)
     return img
#init age data_set
def parse_data(fold_x_data,area_age):
    data_set=[]
    for root,sub_dirs,files in os.walk(fold_x_data):
        for special_file in files:
            if special_file.startswith(".DS_Store"):
                continue
            if special_file.endswith('.bmp'):
                filepath=os.path.join(root,special_file)
                tmp=[]
                tmp.append(filepath)
                tmp.append(age_table.index(area_age))
                data_set.append([tmp[0],tmp[1]])
    return data_set
age10path=''
age20path=''
age30path=''
age40path=''
age50path=''
age60path=''
age70path=''
age80path=''
age90path=''
age100path=''
age10=parse_data(age10path)
age20=parse_data(age20path)
age30=parse_data(age30path)
age40=parse_data(age40path)
age50=parse_data(age50path)
age60=parse_data(age60path)
age70=parse_data(age70path)
age80=parse_data(age80path)
age90=parse_data(age90path)
age100=parse_data(age100path)
data_set=age10+age20+age30+age40+age50+age60+age70+age80+age90+age100
shuffle(data_set)
num_batchs=len(data_set)//batch_size

# get batch datas  from data_set
# to training
def get_next_batch(data_set,batch_size=50):
    global pointer
    batch_x=[]
    batch_y=[]
    for i in range(len(data_set)):
        batch_x.append(get_image(data_set[pointer][0]))
        batch_y.append(data_set[pointer][1])
        pointer+=1
    return batch_x,batch_y

#image data_model
X=tf.placeholder(dtype=tf.float32,shape=[batch_size,IMAGE_HEIGHT,IMAGE_WIDHT,3])
#label data_model
Y=tf.placehoder(dtype=tf.int32,shape=[batch_size])

def conv_net(nlabels,images,pkeep=1.0):
    weight_regularizer=tf.contrib.layers.l2_regularizer()
    with tf.variabl_scope("conv_net","conv_net",[images]) as scope:
        with tf.contrib.slim.arg_scope([convolution2d,fully_connected],weight_regularizer=weight_regularizer,biases_initializer=tf.constant_initializer(1.0),
                                       weights_initializer=tf.random_normal_initializer(stddev=0.01)):
            with tf.contrib.slim.arg_scope([convolution2d],weights_initializer=tf.random_normal_initializer(stddev=0.01)):
                conv1=convolution2d(images,48,[7,7],[3,3],padding="VALID",biases_initializer=tf.constant_initializer(0.),scope='conv1')
                pool1=max_pool2d(conv1,3,3,padding='VALID',scope='pool1')
                norm1=tf.nn.local_response_normalization(pool1,5,alpha=0.0001,beta=0.75,name='norm1')
                conv2=convolution2d(norm1,96,[5,5],[1,1],padding="VALID",scope='conv2')
                pool2=max_pool2d(conv2,3,2,padding='VALID',scope='pool2')
                norm2=tf.nn.local_response_normalization(pool2,5,alpha=0.0001,beta=0.75,name='norm2')
                conv3=convolution2d(norm2,256,[3,3],[1,1],padding='VALID',scope='conv3')
                flat=tf.reshape(conv3,[-1,256*6*3],name='reshape')
                full1=fully_connected(flat,256,scope='full1')
                drop1=tf.nn.dropout(full1,pkeep,name='drop1')
                full2=fully_connected(drop1,256,scope='full2')
                drop2=tf.nn.dropout(full2,pkeep,name='drop1')
    with  tf.variable_scope('output') as scope:
        weights=tf.Variable(tf.random_normal([256,nlabels],mean=0.0,stddev=0.01),name='weights')
        biases=tf.Variable(tf.constant(0.0,shape=[nlabels],dtype=tf.float32),name='biases')
        output=tf.add(tf.matmul(drop2,weights),biases)
        return output
def training():
    logits = conv_net(label_size, X)

    def optimizer(eta, loss_fn):
        global_step = tf.Variable(0, trainable=False)
        optz = lambda lr: tf.train.MomentumOptimizer(lr, 0.9)
        lr_decay_fn = lambda lr, global_step: tf.train.exponential_decay(lr, global_step, 100, 0.97, staircase=True)
        return tf.contrib.layers.optimize_loss(loss_fn, global_step, eta, optz, clip_gradients=4.,
                                               learning_rate_decay_fn=lr_decay_fn)

    def loss(logits,labels):
        cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=labels)
        cross_entropy_mean=tf.reduce_mean(cross_entropy)
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss=cross_entropy_mean+0.01*sum(regularization_losses)
        tf.summary.histogram("/htotal_loss", total_loss)
        tf.summary.scalar("/stotal_loss", total_loss)
        loss_averages = tf.train.ExponentialMovingAverage(0.9)
        loss_averages_op = loss_averages.apply([cross_entropy_mean] + [total_loss])
        with tf.control_dependencies([loss_averages_op]):
            total_loss = tf.identity(total_loss)
        return total_loss

    total_loss = loss(logits, Y)
    train_op = optimizer(0.001, total_loss)
    saver = tf.train.Saver(tf.global_variables())
    with tf.Session() as sess:
        merged=tf.summary.merge_all();
        summary_writer = tf.summary.FileWriter('/work/tensorflow/logdir', sess.graph)
        sess.run(tf.global_variables_initializer())

        global pointer
        epoch = 0
        while True:
            pointer = 0
            for batch in range(num_batchs):
                batch_x, batch_y = get_next_batch(data_set, batch_size)
                summary,_, loss_value = sess.run([merged,train_op, total_loss], feed_dict={X: batch_x, Y: batch_y})
                summary_writer.add_summary(summary, batch)
                print(epoch, batch, loss_value)
            if(epoch%2==0):
                saver.save(sess, modelpath)
            epoch += 1

            if(epoch >max_epoch):return;
training()


#预测图片值
def detectsex(image_path):
    X1 = tf.placeholder(dtype=tf.float32, shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, 3])
    logits = conv_net(label_size, X1)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess,modelpath)
        softmax_output = tf.nn.softmax(logits)
        batch_results = sess.run(softmax_output, feed_dict={X1: [get_image(image_path)]})
        best = np.argmax(batch_results)
        print(best)
        return best;












