#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 12 13:00:18 2017

@author: luohao
"""

import numpy as np
import os
from keras import optimizers
from keras.utils import np_utils, generic_utils
from keras.models import Sequential, Model, load_model
from keras.layers import Dropout, Flatten, Dense, Input, GlobalAveragePooling2D, GlobalMaxPooling2D
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.applications.inception_v3 import InceptionV3
from keras.applications.imagenet_utils import preprocess_input
from keras import backend as K
from keras.layers.core import Lambda
from scipy.misc import imread, imresize
from sklearn.preprocessing import normalize
from keras.preprocessing.image import ImageDataGenerator
from keras.initializers import RandomNormal
import tensorflow as tf

import dao
from datasets import load_dataset
from inception_resnet_v1 import InceptionResNetV1
from models.aug import aug_nhw3
import numpy.linalg as la
from six.moves import xrange
import facenet

IMAGE_SIZE = 224

not_header = ['商品名称', '店铺', '商品毛重', '商品编号', '上市时间', '货号', '商品产地',
              '表盘颜色', '适用年龄', '尺码', '适用年龄', '销售渠道类型', '材质成分', '品牌', '品牌名称',
              '上市年份季节', '年份季节', '配件/备注', '产地', '洗涤说明', '面料成分', '材质', '大码女装分类', '分类', '适用季节',
              '面料', '成分含量', '质地', '尺寸', '服装款式细节', '材质1', '自定义', '材质3', '深灰色', '黑色', '深蓝', '深蓝2.0',
              '温馨提示', '藏青色', '灰色材质', '成分', '材质2', '蓝/黑色材质', '白色材质', '纤维成份', '里料', '厚薄', '厚度',
              '上市年份/季节', '年份/季节', '街头', '甜美', '主要颜色', '规格', '罩杯材质', '中老年女装分类', '穿着方式', '尺码', '品牌',
              '通勤', '流行元素', '中老年风格', '服饰工艺', '女装质地', '适用场景', '网纱', '组合形式', '毛线粗细', '花朵', '流行元素',
              '工艺', '面料分类', '风格', '中老年女装图案', '适用对象', '克重', '组合规格', '里料分类', '主要材质', '图案',
              '填充物', '牛仔面料盎司', '功能', '设计裁剪', '人群', '图案文化', '流行元素/工艺', '流行元素/工艺', '弹力', '2.0材质',
              '适用场合', '重量', '类型', '弹性', '款式', '选购热点', '适用人群', '适合人群', '面料成份'
              ]

catalog = [93, 94, 95]  # ['上装','裙装','裤装','特色服饰']
sleeve = ['长袖', '短袖', '无袖', '五分袖', '七分袖', '九分袖']  # 袖长
# collar = ['V领', '不规则领', '圆领', '低圆领', '其它', '高领', '半高领', '翻领', '连帽', '围巾领', 'A字型', '立领', '半高圆领',
#           '一字领', '方领', '旗袍领', 'POLO领', '堆堆领', '无领', '椭圆领', '半开领', 'U型领', '八字领', '高圆领', '低领', '常规领',
#           '娃娃领', '鸡心领', '平领', '双层领', '披肩领', '荡领', '橄榄领', '海军领', '大尖领', '系带领', '无', '荷叶领', '挂脖']  # 领型
collar = ['圆领', '其它', 'V领', '高领', '娃娃领', '立领', '方领', '翻领', '海军领', '半高领', 'POLO领', '连帽', '一字领', '半高圆领', '荷叶领']
coat_length = ['超短款', '短款', '常规款', '中长款', '长款', '不对称衣长']  # 衣长
dress_length = ['超短裙', '短裙', '中裙', '中长裙', '长裙']  # 裙长
outseam = ['短裤', '长裤', '热裤', '五分裤', '七分裤', '九分裤']  # 裤长
waistline = ['低腰', '中腰', '高腰', '松紧腰', '调节腰']  # 腰型


def get_good_ids(base_dir):
    return [name for name in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, name))]


class ImageClass():
    "Stores the paths to images for a given class"

    def __init__(self, good_id, image_paths, catalog, attr):
        self.good_id = good_id
        self.image_paths = image_paths
        self.catalog = catalog
        self.attr = attr

    def __str__(self):
        return self.good_id + ', ' + str(len(self.image_paths)) + ' images' + ','

    def __len__(self):
        return len(self.image_paths)


def get_img_info(base_dir):
    images = []

    is_root = True
    good_ids = get_good_ids(base_dir)
    good_num = 0
    for good_id in good_ids:
        img_attr = []
        dir = os.path.join(base_dir, good_id)
        # catalog_categorical = np.zeros(len(catalog))
        # catalog_categorical = []
        # 上装、裙装、裤装
        # 袖长
        sleeve_categorical = np.zeros(len(sleeve))
        # sleeve_categorical = [0]*len(sleeve)
        # # 领型
        collar_categorical = np.zeros(len(collar))
        # # 衣长
        coat_length_categorical = np.zeros(len(coat_length))
        #
        # # 裙长
        dress_length_categorical = np.zeros(len(dress_length))
        # # 袖长
        # # 裤长
        outseam_categorical = np.zeros(len(outseam))
        # # 腰型
        waistline_categorical = np.zeros(len(waistline))

        # 图片路径
        image_paths = [os.path.join(dir, name) for name in os.listdir(dir)]
        str_sql = "select catalogID,description from t_product where sourceProductID='%s'" % good_id
        result = dao.select_one(str_sql)
        if result:
            # 商品分类
            catalog_id = int(result['catalogID'])
            # catalog_categorical[catalog.index(catalog_id)] = 1.
            # print(catalog_categorical)
            catalog_categorical = np_utils.to_categorical(catalog.index(catalog_id), len(catalog))
            # 商品属性
            try:
                attrs = eval(result['description'])
                for key in attrs.keys():
                    if key in not_header:
                        continue
                    attr = attrs.get(key)  # [0:3]
                    if key == '袖长':
                        if attr == '中袖':
                            attr = '五分袖'
                        if not catalog_id ==95:
                            sleeve_categorical[sleeve.index(attr)] = 1.
                    elif key == '领型':
                        collar_categorical[collar.index(attr)] = 1.
                    elif key == '衣长':
                        coat_length_categorical[coat_length.index(attr)] = 1.
                    elif key == '裙长':
                        dress_length_categorical[dress_length.index(attr)] = 1.
                    elif key == '裤长':
                        if catalog_id ==95:
                            outseam_categorical[outseam.index(attr)] = 1.
                    elif key == '腰型':
                        if catalog_id ==95:
                            waistline_categorical[waistline.index(attr)] = 1.
            except Exception as e:
                print('catalog_id:' + str(catalog_id) + ',attr eval error:' + key + ',good_id:' + good_id)
                print(e)
                continue
            # img_attr.extend(catalog_categorical)
            img_attr.extend(sleeve_categorical)
            img_attr.extend(collar_categorical)
            img_attr.extend(coat_length_categorical)
            img_attr.extend(dress_length_categorical)
            img_attr.extend(outseam_categorical)
            img_attr.extend(waistline_categorical)
            images.append(
                ImageClass(good_id, image_paths, catalog_categorical, np.array(img_attr).reshape(1, len(img_attr))))
    return images


def triplet_loss(y_true, y_pred):
    y_pred = K.l2_normalize(y_pred, axis=1)
    batch = PN
    ref1 = y_pred[0:batch, :]
    pos1 = y_pred[batch:batch + batch, :]
    neg1 = y_pred[batch + batch:3 * batch, :]
    dis_pos = K.sum(K.square(ref1 - pos1), axis=1, keepdims=True)
    dis_neg = K.sum(K.square(ref1 - neg1), axis=1, keepdims=True)
    dis_pos = K.sqrt(dis_pos)
    dis_neg = K.sqrt(dis_neg)
    a1 = 0.2
    d1 = K.maximum(0.0, dis_pos - dis_neg + a1)
    return K.mean(d1)


def triplet_hard_loss(y_true, y_pred):
    global SN
    global PN
    feat_num = SN * PN  # images num
    y_pred = K.l2_normalize(y_pred, axis=1)
    feat1 = K.tile(K.expand_dims(y_pred, axis=0), [feat_num, 1, 1])
    feat2 = K.tile(K.expand_dims(y_pred, axis=1), [1, feat_num, 1])
    delta = feat1 - feat2
    dis_mat = K.sum(K.square(delta), axis=2)
    dis_mat = K.sqrt(dis_mat) + 1e-8  # 1e-8 is not necessary
    positive = dis_mat[0:SN, 0:SN]
    negetive = dis_mat[0:SN, SN:]
    for i in range(1, PN):
        positive = tf.concat([positive, dis_mat[i * SN:(i + 1) * SN, i * SN:(i + 1) * SN]], axis=0)
        if i != PN - 1:
            negs = tf.concat([dis_mat[i * SN:(i + 1) * SN, 0:i * SN], dis_mat[i * SN:(i + 1) * SN, (i + 1) * SN:]],
                             axis=1)
        else:
            negs = tf.concat(dis_mat[i * SN:(i + 1) * SN, 0:i * SN], axis=0)
        negetive = tf.concat([negetive, negs], axis=0)
    positive = K.max(positive, axis=1)
    negetive = K.min(negetive, axis=1)
    a1 = 0.6
    loss = K.mean(K.maximum(0.0, positive - negetive + a1))
    return loss


def select_triplets(embeddings, nrof_images_per_class, image_paths, people_per_batch, alpha):
    """ Select the triplets for training
    """
    trip_idx = 0
    emb_start_idx = 0
    num_trips = 0
    triplets = []

    # VGG Face: Choosing good triplets is crucial and should strike a balance between
    #  selecting informative (i.e. challenging) examples and swamping training with examples that
    #  are too hard. This is achieve by extending each pair (a, p) to a triplet (a, p, n) by sampling
    #  the image n at random, but only between the ones that violate the triplet loss margin. The
    #  latter is a form of hard-negative mining, but it is not as aggressive (and much cheaper) than
    #  choosing the maximally violating example, as often done in structured output learning.

    for i in xrange(people_per_batch):
        nrof_images = int(nrof_images_per_class[i])
        for j in xrange(1, nrof_images):
            a_idx = emb_start_idx + j - 1
            neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)
            for pair in xrange(j, nrof_images):  # For every possible positive pair.
                p_idx = emb_start_idx + pair
                pos_dist_sqr = np.sum(np.square(embeddings[a_idx] - embeddings[p_idx]))
                neg_dists_sqr[emb_start_idx:emb_start_idx + nrof_images] = np.NaN
                # all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0]  # FaceNet selection
                all_neg = np.where(neg_dists_sqr - pos_dist_sqr < alpha)[0]  # VGG Face selecction
                nrof_random_negs = all_neg.shape[0]
                if nrof_random_negs > 0:
                    rnd_idx = np.random.randint(nrof_random_negs)
                    n_idx = all_neg[rnd_idx]
                    triplets.append((image_paths[a_idx], image_paths[p_idx], image_paths[n_idx]))
                    # print('Triplet %d: (%d, %d, %d), pos_dist=%2.6f, neg_dist=%2.6f (%d, %d, %d, %d, %d)' %
                    #    (trip_idx, a_idx, p_idx, n_idx, pos_dist_sqr, neg_dists_sqr[n_idx], nrof_random_negs, rnd_idx, i, j, emb_start_idx))
                    trip_idx += 1

                num_trips += 1

        emb_start_idx += nrof_images

    np.random.shuffle(triplets)
    return triplets, num_trips, len(triplets)


def sample_people(dataset, people_per_batch, images_per_person):
    nrof_images = people_per_batch * images_per_person
    # Sample classes from the dataset
    nrof_classes = len(dataset)
    class_indices = np.arange(nrof_classes)
    np.random.shuffle(class_indices)
    i = 0
    image_paths = []
    num_per_class = []
    sampled_class_indices = []
    # Sample images from these classes until we have enough
    while len(image_paths) < nrof_images:
        class_index = class_indices[i]
        nrof_images_in_class = len(dataset[class_index])
        image_indices = np.arange(nrof_images_in_class)
        np.random.shuffle(image_indices)
        nrof_images_from_class = min(nrof_images_in_class, images_per_person, nrof_images - len(image_paths))
        idx = image_indices[0:nrof_images_from_class]
        image_paths_for_class = [dataset[class_index].image_paths[j] for j in idx]
        sampled_class_indices += [class_index] * nrof_images_from_class
        image_paths += image_paths_for_class
        num_per_class.append(nrof_images_from_class)
        i += 1

    return image_paths, sampled_class_indices, num_per_class


def get_triplet_hard_data(dataset, SN, PN):
    '''
    SN 每人几张照片/batch，images_per_person
    PN 人数/batch，people_per_batch
    '''
    # dataset = facenet.get_dataset('E:/cdn/classes/useful')

    image_paths, train_label, _ = sample_people(dataset, PN, SN)
    nrof_samples = len(image_paths)
    images = np.zeros((nrof_samples, IMAGE_SIZE, IMAGE_SIZE, 3))

    for i in range(nrof_samples):
        img = imread(image_paths[i])
        H, W = img.shape[0], img.shape[1]
        H_crop = H - H % 16
        W_crop = W - W % 16
        img = img[:H_crop, :W_crop]
        img = imresize(img, (IMAGE_SIZE, IMAGE_SIZE))
        images[i, :, :, :] = img

    return images, train_label


def sample_people1(dataset, PN):
    nrof_classes = len(dataset)
    class_indices = np.arange(nrof_classes)
    np.random.shuffle(class_indices)
    image_paths = []
    sampled_class_indices = []
    for i in range(PN):
        class_index = class_indices[i]
        nrof_images_in_class = len(dataset[class_index])
        image_indices = np.arange(nrof_images_in_class)
        np.random.shuffle(image_indices)
        # a,p
        idx = image_indices[0:2]
        image_paths_for_class = [dataset[class_index].image_paths[j] for j in idx]
        image_paths += image_paths_for_class
        sampled_class_indices += [class_index] * 2
        # neg
        n_class_index = class_indices[i + 1]
        n_idx = np.random.randint(len(dataset[n_class_index]))
        image_paths.append(dataset[n_class_index].image_paths[n_idx])
        sampled_class_indices += [n_class_index]
    return image_paths, sampled_class_indices


'''
def get_triplet_data(dataset, PN):
    image_paths, sampled_class_indices = sample_people1(dataset, PN)
    print(image_paths)
    nrof_samples = len(image_paths)
    print('nrof_samples:' + str(nrof_samples))
    images = np.zeros((nrof_samples, IMAGE_SIZE, IMAGE_SIZE, 3))
    labels = np.zeros(nrof_samples)
    for i in range(0, PN, 3):
        img = imread(image_paths[i])
        H, W = img.shape[0], img.shape[1]
        H_crop = H - H % 16
        W_crop = W - W % 16
        img = img[:H_crop, :W_crop]
        img = imresize(img, (IMAGE_SIZE, IMAGE_SIZE))
        images[i, :, :, :] = img
        labels[i] = sampled_class_indices[i]

        img = imread(image_paths[i + 1])
        H, W = img.shape[0], img.shape[1]
        H_crop = H - H % 16
        W_crop = W - W % 16
        img = img[:H_crop, :W_crop]
        img = imresize(img, (IMAGE_SIZE, IMAGE_SIZE))
        images[i + PN, :, :, :] = img
        labels[i + PN] = sampled_class_indices[i + 1]

        img = imread(image_paths[i + 2])
        H, W = img.shape[0], img.shape[1]
        H_crop = H - H % 16
        W_crop = W - W % 16
        img = img[:H_crop, :W_crop]
        img = imresize(img, (IMAGE_SIZE, IMAGE_SIZE))
        images[i + PN * 2, :, :, :] = img
        labels[i + PN * 2] = sampled_class_indices[i + 2]
    return images, labels
'''


def get_triplet_data_from_dataset(dataset, PN):
    nrof_classes = len(dataset)
    class_indices = np.arange(nrof_classes)
    np.random.shuffle(class_indices)

    images = np.zeros((3 * PN, IMAGE_SIZE, IMAGE_SIZE, 3))
    labels = np.zeros(3 * PN)

    for i in range(PN):
        class_index = class_indices[i]
        nrof_images_in_class = len(dataset[class_index])
        image_indices = np.arange(nrof_images_in_class)
        np.random.shuffle(image_indices)
        # a,p
        images[i, :, :, :] = dataset[class_index].images[image_indices[0]]
        labels[i] = class_index
        if nrof_images_in_class > 1:
            images[i + PN, :, :, :] = dataset[class_index].images[image_indices[1]]
        else:
            images[i + PN, :, :, :] = dataset[class_index].images[image_indices[0]]
        labels[i + PN] = class_index

        # neg
        n_class_index = np.random.randint(nrof_classes - 1)
        if n_class_index == class_index:
            n_class_index += 1
        images[i + PN * 2, :, :, :] = dataset[n_class_index].images[0]
        labels[i + PN * 2] = n_class_index
    return images, labels


def get_triplet_data(images_info, PN):
    nrof_classes = len(images_info)
    class_indices = np.arange(nrof_classes)
    np.random.shuffle(class_indices)

    images = np.zeros((3 * PN, IMAGE_SIZE, IMAGE_SIZE, 3))
    catalog_labels = np.zeros((3 * PN, len(images_info[0].catalog)))
    attr_labels = np.zeros((3 * PN, len(images_info[0].attr[0])))

    for i in range(PN):
        class_index = class_indices[i]
        nrof_images_in_class = len(images_info[class_index])
        image_indices = np.arange(nrof_images_in_class)
        np.random.shuffle(image_indices)
        # a,p
        images[i, :, :, :] = get_img_array(images_info[class_index].image_paths[image_indices[0]])
        attr_labels[i] = images_info[class_index].attr[0]
        catalog_labels[i] = images_info[class_index].catalog
        if nrof_images_in_class > 1:
            images[i + PN, :, :, :] = get_img_array(images_info[class_index].image_paths[image_indices[1]])
        else:
            images[i + PN, :, :, :] = get_img_array(images_info[class_index].image_paths[image_indices[0]])
        attr_labels[i + PN] = images_info[class_index].attr[0]
        catalog_labels[i + PN] = images_info[class_index].catalog

        # neg
        n_class_index = np.random.randint(nrof_classes - 1)
        if n_class_index == class_index:
            n_class_index += 1
        images[i + PN * 2, :, :, :] = get_img_array(
            images_info[n_class_index].image_paths[0])  # dataset[n_class_index].images[0]
        attr_labels[i + PN * 2] = images_info[n_class_index].attr[0]
        catalog_labels[i + PN * 2] = images_info[n_class_index].catalog
    return images, catalog_labels, attr_labels


def get_random_batch(images_info, batch_num=50):
    nrof_classes = len(images_info)
    if nrof_classes <= batch_num:
        return images_info
    batch_images = []
    class_indices = np.arange(nrof_classes)
    np.random.shuffle(class_indices)
    for i in range(batch_num):
        batch_images.append(images_info[i])
    return batch_images


def get_img_array(img_path):
    img = imread(img_path)
    if not img.shape == (IMAGE_SIZE, IMAGE_SIZE, 3):
        img = imresize(IMAGE_SIZE, IMAGE_SIZE, 3)
    # img = img / 255.
    return img


def droped():
    SN = 3
    PN = 20
    identity_num = 100

    print('loading data...')
    dataset = facenet.get_dataset('E:/cdn/classes/useful')
    # dataset_train, dataset_test = facenet.split_dataset(dataset, 0.8, 'SPLIT_IMAGES')
    print('loading data end')
    train_img, train_label = get_triplet_data(dataset, PN)
    # load pre-trained resnet50
    base_model = ResNet50(weights='imagenet', include_top=False,
                          input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
    x = base_model.output
    # x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    # feature = Dense(512, activation=None)(x)
    feature = Flatten(name='flatten')(x)
    fc1 = Dropout(0.5)(feature)
    preds = Dense(identity_num, activation='softmax', name='fc8')(fc1)  # default glorot_uniform
    # net = Model(inputs=base_model.input, outputs=preds)
    # feature_model = Model(inputs=base_model.input, outputs=feature)
    class_triplet_model = Model(inputs=base_model.input, outputs=[preds, feature])
    # class_triplet_model = Model(inputs=base_model.input, outputs=feature)

    # print(len(base_model.layers))

    # training IDE model for all layers
    # layer_num = len(base_model.layers)
    # for layer in base_model.layers[:layer_num - 1]:
    #     layer.trainable = False
    # for layer in base_model.layers[layer_num - 1:]:
    #     layer.trainable = True

    for layer in base_model.layers:
        layer.trainable = False

    # train
    batch_num = PN
    # adam = optimizers.Adam(lr=0.00001)
    lr = 0.001
    adam = optimizers.adam(lr)
    # adam = optimizers.SGD(lr=0.0001,momentum=0.9,decay=0.0005)
    # net.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
    class_triplet_model.compile(optimizer=adam, loss=['categorical_crossentropy', triplet_loss],
                                loss_weights=[1.0, 1.0])
    # class_triplet_model.compile(optimizer=adam, loss=triplet_loss, metrics=['accuracy'])
    # you can load pre-trained model here
    # net.load_weights('triplet_hard_aug_save.h5')


    for i in range(1000):
        print('i:' + str(i))
        # the data in a batch: A1 B1 C1 ...PN1 A2 B2 C2 ... PN2 G K S ... Negative(PN)
        train_img, train_label = get_triplet_data(dataset, PN)
        # the data in a batch : A1 A2 A3... ASN B1 B2 B3... BSN ... PN1 PN2 PN3... PNSN
        # train_img, train_label = get_triplet_hard_data(dataset_train, SN, PN)
        train_img = aug_nhw3(train_img)
        train_img = preprocess_input(train_img)
        train_label_onehot = np_utils.to_categorical(train_label, identity_num)
        class_triplet_model.fit(train_img,
                                y=[train_label_onehot, np.ones([PN * SN, 2048])],
                                shuffle=False, epochs=1, batch_size=PN * SN)  # for triplet loss: SN = 3

        # 　图片生成器
        # train_datagen = ImageDataGenerator(
        #     preprocessing_function=preprocess_input,
        #     rotation_range=30,
        #     width_shift_range=0.2,
        #     height_shift_range=0.2,
        #     shear_range=0.2,
        #     zoom_range=0.2,
        #     horizontal_flip=True
        # )
        #
        # train_generator = train_datagen.flow(train_img, train_label, batch_size=PN * SN, shuffle=False)
        # ,save_to_dir='D:/generator', save_prefix='useful', save_format='jpg')

        # history = class_triplet_model.fit_generator(
        #     train_generator,
        #     epochs=1,
        #     steps_per_epoch=10,
        #     class_weight="auto")

    # 保存模型
    model_save = os.path.join('E:/cdn/models', 'triplet_resnet50.h5')
    class_triplet_model.save(model_save)
    class_triplet_model.summary()


if __name__ == '__main__':
    images = get_img_info('E:/cdn/datasets/triplet')
    random_batch = 50
    SN = 3
    PN = 20
    catalog_num = len(images[0].catalog)
    attr_num = len(images[0].attr[0])
    # load pre-trained resnet50
    base_model = ResNet50(weights='imagenet', include_top=False,
                          input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
    x = base_model.output
    # feature = Flatten(name='flatten')(x)
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.5)(x)
    feature = Dense(512, use_bias=False, name='Bottleneck')(x)
    x = Dense(512, activation='relu')(x)  # new FC layer, random init
    catalog_preds = Dense(catalog_num, activation='softmax', name='fc_catalog')(x)
    preds = Dense(attr_num, activation='sigmoid', name='fc_attr')(x)  # new softmax layer

    # x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    # feature = Dense(512, activation=None)(x)

    # fc1 = Dropout(0.5)(feature)
    # preds = Dense(identity_num, activation='softmax', name='fc8')(fc1)  # default glorot_uniform
    class_triplet_model = Model(inputs=base_model.input, outputs=[catalog_preds, preds, feature])
    # class_triplet_model = Model(inputs=base_model.input, outputs=preds)
    layer_num = len(base_model.layers)
    for layer in base_model.layers:
        layer.trainable = False
    # base_model.layers[layer_num - 1].trainable = True

    # adam = optimizers.Adam(lr=0.00001)
    lr = 0.001
    adam = optimizers.adam(lr)
    # adam = optimizers.SGD(lr=0.0001, momentum=0.9, decay=0.0005)
    class_triplet_model.compile(optimizer=adam, loss=['categorical_crossentropy', 'binary_crossentropy', triplet_loss],
                                loss_weights=[1.0, 1.0, 1.0], metrics={'fc_catalog': 'accuracy', 'fc_attr': 'accuracy'})
    # class_triplet_model.compile(optimizer=adam, loss='binary_crossentropy',metrics=['accuracy'])

    datagen = ImageDataGenerator(
        featurewise_center=True,
        featurewise_std_normalization=True,
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=True)

    # train
    for i in range(60):
        print('random_batch_i:' + str(i))
        batch_images = get_random_batch(images, random_batch)
        # train_img, train_catalog_label, train_attr_label = get_triplet_data(images, PN)
        for j in range(150):  # 最大服装图片有四五百
            print('i_' + str(i) + '_sub_batch_j:' + str(j))
            train_img, train_catalog_label, train_attr_label = get_triplet_data(batch_images, PN)
            train_img = aug_nhw3(train_img)
            train_img = preprocess_input(train_img)
            class_triplet_model.fit(train_img,
                                    y=[train_catalog_label, train_attr_label, np.ones([PN * SN, 512])],
                                    shuffle=False, epochs=1, batch_size=PN * SN)
            '''
            datagen.fit(train_img, augment=True)
            # for epoch in range(5):
            epoch = 0
            for x_train, y_train in datagen.flow(train_img, train_attr_label, batch_size=PN * SN, shuffle=False):
                print('i_' + str(i) + '_sub_batch_j_' + str(j) + '_epoch:' + str(epoch))
                # train_img = aug_nhw3(train_img)
                # train_img = preprocess_input(train_img)
                class_triplet_model.fit(x_train,
                                        y=[y_train, np.ones([PN * SN, 512])],
                                        shuffle=False, epochs=1, batch_size=PN * SN)
                epoch += 1
                if epoch > 2:
                    break
                    # class_triplet_model.fit(x_train,y=y_train,shuffle=False, epochs=1, batch_size=PN * SN)
            '''
    # 保存模型
    model_save = os.path.join('E:/cdn/models', 'triplet_resnet50.h5')
    class_triplet_model.save(model_save)
    class_triplet_model.summary()
