#!/usr/bin/env python
# coding: utf-8

# 模型构建

# In[2]:


import tensorflow as tf
 
# 导入所有必要的层
 
from tensorflow.keras.layers import Input, DepthwiseConv2D,Reshape,Dropout
from tensorflow.keras.layers import Conv2D, BatchNormalization,add, Flatten, Activation,MaxPooling2D, AveragePooling2D
from tensorflow.keras.layers import ReLU, AvgPool2D, Flatten, Dense,GlobalAveragePooling2D,ZeroPadding2D

from tensorflow.keras import Model



from tensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.python.keras.applications.mobilenet_v3 import MobileNetV3Large
from tensorflow.python.keras.applications.mobilenet_v3 import MobileNetV3Small


from tensorflow.keras.applications.mobilenet import MobileNet
import tensorflow
import os
import tensorflow as tf
 
config=tf.compat.v1.ConfigProto()

config.gpu_options.allow_growth = True   #设置动态分配显存
session = tf.compat.v1.Session(config=config)

class_count=46
# 构建不带分类器的预训练模型

base_model = MobileNetV2(weights='imagenet',input_shape=(96,96,3),alpha=0.5, include_top=False)

x = base_model.output

x = GlobalAveragePooling2D()(x)
x=Dropout(0.03)(x)
x=Flatten()(x)

predictions = Dense(class_count, activation='softmax')(x)

model = Model(inputs=base_model.input, outputs=predictions)
model.summary()


# In[7]:


# 锁住所有 InceptionV3 的卷积层
for layer in base_model.layers:
    layer.trainable = True
model.summary()


# In[4]:


import numpy as np

def data_list_get(root_file,shuffle,leng):
    All_class_coun = []  
    train_data=[]
    test_data=[]
    for file in os.listdir(root_file):
        file_path=root_file+"//"+file
        if os.path.isdir(file_path):
            for sub_file in os.listdir(file_path):
                All_class_coun.append(file_path+"//"+sub_file)
            indexes = np.arange(len(All_class_coun))
            if shuffle == True:
                np.random.shuffle(indexes)
            train_num=int(leng*len(All_class_coun))
            train_clas=indexes[0:train_num]
            test_clas=indexes[train_num:]
            for i in train_clas:
                train_data.append(All_class_coun[i])
            for i in test_clas:
                test_data.append(All_class_coun[i])
            All_class_coun.clear()
    return train_data,test_data

file1='D://Garbage Dataset6//while//train_96'
file2='D://Garbage Dataset6//while//test'


train_data,_=data_list_get(file1,True,1)
test_data,_=data_list_get(file2,False,1)
len(train_data),len(test_data)


# 数据集API

# In[9]:


from tensorflow.keras.callbacks import Callback
from    tensorflow.keras.callbacks import EarlyStopping
from    tensorflow.keras import layers,optimizers,losses
from acc_aver import evaluate_aver_accuracy
import math

class acc_back_Loss(Callback):
    def __init__(self, epochs, name):
        self.epochs = epochs
        self.name = name

    def on_train_begin(self, logs={}):
        self.losses = []
        print('starttrain')

    def on_epoch_end(self, epoch, logs={}):
        pi=3.1415926535897932384626433832795/2
        min_num=pi/self.epochs
        print("epoch=%d,all_epoch=%d\n"%(epoch,self.epochs))
        ler_=tensorflow.keras.backend .get_value(newnet.optimizer.lr)
        print("学习率为%.7f\n"%ler_)
        
        tensorflow.keras.backend.set_value(newnet.optimizer.lr, ler_ *math.cos((epoch+1)*min_num))
        evaluate_aver_accuracy(1,class_count,newnet,test_generator)
        print('starttrain')
        
    def on_train_end(self, logs={}):
        print("endtrain")


newnet = tensorflow.keras.Sequential([
    model
  
])

size=96
newnet.build(input_shape=(1, size, size, 3))
newnet.summary()

early_stopping = EarlyStopping(
    monitor='accuracy',
    min_delta=0.001,
    patience=5
)



import  albumentations as A
from My_data_loader import DataGenerator
from acc_aver import evaluate_aver_accuracy

    
    
trans = A.Compose([
        A.HorizontalFlip(p=0.6),
        #A.GaussNoise(var_limit=10,  always_apply=False, p=0.5),
        #A.MotionBlur(p=0.2), 
        A.ShiftScaleRotate(shift_limit=0, scale_limit=0, rotate_limit=50, p=1),
        #A.Blur(blur_limit=3, always_apply=True, p=1), 
        A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, always_apply=False, p=0.5),
         A.GaussNoise(),
        A.Normalize(mean=(0.485,0.456,0.406), std=(0.229,0.224,0.225), always_apply=True, max_pixel_value=255.0, p=1.0),
        
        A.VerticalFlip(p=0.6)
        ])
test = A.Compose([
        #A.HorizontalFlip(p=0.6),
A.GaussNoise(),
        #A.ShiftScaleRotate(shift_limit=0, scale_limit=0, rotate_limit=50, p=1),
        #A.VerticalFlip(p=0.6),
        #A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, always_apply=False, p=0.5),
        #A.Blur(blur_limit=3, always_apply=True, p=1), 

        A.Normalize(mean=(0.485,0.456,0.406), std=(0.229,0.224,0.225), always_apply=True, max_pixel_value=255.0, p=1.0),

    
        ])
batch_size=128
learning_rate=0.0001
epoch=5
newnet.compile(optimizer=optimizers.SGD(learning_rate=learning_rate),
               loss=losses.CategoricalCrossentropy(from_logits=True),
               metrics=['accuracy'])
history = acc_back_Loss(epoch , "modelname")

training_generator = DataGenerator(datas=train_data,batch_size=batch_size,size=size,channel=3,Compose=trans,count=class_count)
test_generator = DataGenerator(datas=test_data,batch_size=batch_size,size=size,channel=3,Compose=test,count=class_count)

history_1 = newnet.fit(training_generator, epochs=epoch, batch_size=batch_size,  callbacks=[history],   
                        validation_data=(test_generator))


# 模型保存

# In[10]:


from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import model_from_json

import os
MODELS_DIR = 'models/'

if not os.path.exists(MODELS_DIR):
    os.mkdir(MODELS_DIR)
MODEL_TF = MODELS_DIR + 'model'
newnet.save(MODEL_TF)


# 设置校验数据集

# In[11]:


from My_data_loader import DataGenerator
size=96
batch_size=1
scale=0
test_generator = DataGenerator(datas=test_data,batch_size=batch_size,size=size,channel=3,Compose=test,count=class_count,scale=scale)

def representative_dataset_gen():
    count=0
    for x,y in test_generator:
        x=x
        x=x.reshape((batch_size,size,size,3))
        count=count+1
        
        print(count)
        if(count>1578):
            break
        yield  [tf.dtypes.cast(x, tf.float32)]
size


# 模型转换为Tensorflow lite并保存

# In[12]:


import tensorflow as tf


    
converter = tf.lite.TFLiteConverter.from_saved_model(MODEL_TF)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
# Enforce integer only quantization
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
# Provide a representative dataset to ensure we quantize correctly.
converter.representative_dataset = representative_dataset_gen
model_tflite = converter.convert()
MODEL_TFLITE = MODELS_DIR + 'model.tflite'

open(MODEL_TFLITE, "wb").write(model_tflite)

