#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
纯 CPU 中草药图片分类完整脚本
python train_cpu.py
"""
import os, shutil, random, tensorflow as tf
from pathlib import Path

# -------------------------------------------------
# 1. 基础配置
IMG_SIZE    = 96
BATCH_SIZE  = 16
EPOCHS      = 12
IMAGES_DIR  = Path('./data/images')   # 原始图片
TRAIN_DIR   = Path('./data/train')
VAL_DIR     = Path('./data/val')
MODEL_DIR   = Path('./models')
MODEL_DIR.mkdir(exist_ok=True)
CLASS_NAMES = ['baihe','dangshen','gouqi','huaihua','jinyinhua']

# 让 TensorFlow 用 UTF-8（避免中文路径报错）
os.environ["PYTHONIOENCODING"] = "utf-8"

# -------------------------------------------------
# 2. 辅助：自动 80/20 切分（含中文文件名兼容）
def split_data():
    exts = ('*.jpg', '*.jpeg', '*.png', '*.JPG', '*.JPEG', '*.PNG')
    classes = [p.name for p in IMAGES_DIR.iterdir() if p.is_dir()]
    if not classes:
        raise RuntimeError(f'{IMAGES_DIR} 下没有找到任何类别文件夹！')

    print('检测到类别:', classes)
    for cls in classes:
        (TRAIN_DIR/cls).mkdir(parents=True, exist_ok=True)
        (VAL_DIR/cls).mkdir(parents=True, exist_ok=True)

        imgs = []
        for ext in exts:
            imgs.extend((IMAGES_DIR/cls).glob(ext))

        if not imgs:
            print(f'⚠️  {IMAGES_DIR/cls} 无图片')
            continue

        random.shuffle(imgs)
        split = int(0.8 * len(imgs))
        for f in imgs[:split]:
            shutil.copy2(f, TRAIN_DIR/cls)
        for f in imgs[split:]:
            shutil.copy2(f, VAL_DIR/cls)

        trn_cnt = len(list((TRAIN_DIR/cls).glob('*')))
        val_cnt = len(list((VAL_DIR/cls).glob('*')))
        print(f'{cls}: 总 {len(imgs)} 张 → 训练 {trn_cnt} / 验证 {val_cnt}')

# -------------------------------------------------
# 3. 数据管道
def make_ds(root, training=True):
    ds = tf.keras.utils.image_dataset_from_directory(
        root,
        image_size=(IMG_SIZE, IMG_SIZE),
        batch_size=BATCH_SIZE,
        label_mode='categorical')

    # 添加预处理步骤，确保与推理时一致
    def preprocess(x, y):
        x = tf.cast(x, tf.float32)
        x = tf.keras.applications.mobilenet_v3.preprocess_input(x)
        return x, y

    ds = ds.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)

    if training:
        aug = tf.keras.Sequential([
            tf.keras.layers.RandomFlip('horizontal'),
            tf.keras.layers.RandomRotation(0.05),
            tf.keras.layers.RandomZoom(0.05)
        ])
        ds = ds.map(lambda x, y: (aug(x, training=True), y),
                    num_parallel_calls=tf.data.AUTOTUNE)
    return ds.prefetch(tf.data.AUTOTUNE)


# -------------------------------------------------
# 4. 构建模型
def build_model(num_classes):
    base = tf.keras.applications.MobileNetV3Small(
        input_shape=(IMG_SIZE, IMG_SIZE, 3),
        include_top=False,
        weights='imagenet',
        pooling='avg')
    base.trainable = False

    inputs = tf.keras.Input((IMG_SIZE, IMG_SIZE, 3))
    x = tf.keras.applications.mobilenet_v3.preprocess_input(inputs)
    x = base(x, training=False)
    x = tf.keras.layers.Dropout(0.2)(x)
    outputs = tf.keras.layers.Dense(num_classes, activation='softmax')(x)
    return tf.keras.Model(inputs, outputs)


def build_model_with_backbone(num_classes):
    base = tf.keras.applications.MobileNetV3Small(
        input_shape=(IMG_SIZE, IMG_SIZE, 3),
        include_top=False,
        weights='imagenet',
        pooling='avg')
    base.trainable = False

    inputs = tf.keras.Input((IMG_SIZE, IMG_SIZE, 3))
    x = tf.keras.applications.mobilenet_v3.preprocess_input(inputs)
    x = base(x, training=False)
    x = tf.keras.layers.Dropout(0.2)(x)
    outputs = tf.keras.layers.Dense(num_classes, activation='softmax')(x)
    return tf.keras.Model(inputs, outputs), base


# -------------------------------------------------
# 5. 训练 + 微调
def train():
    split_data()
    train_ds = make_ds(TRAIN_DIR, training=True)
    val_ds = make_ds(VAL_DIR, training=False)

    # 动态读取类别顺序，避免手动写错
    classes_dirs = [d.name for d in TRAIN_DIR.iterdir() if d.is_dir()]
    class_names = sorted(classes_dirs)  # 确保类别顺序一致
    num_classes = len(class_names)
    print('最终类别顺序:', class_names)

    model = build_model(num_classes)
    model.compile(optimizer=tf.keras.optimizers.Adam(1e-3),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    # 阶段 1：冻结 backbone
    history1 = model.fit(
        train_ds,
        validation_data=val_ds,
        epochs=EPOCHS,
        callbacks=[
            tf.keras.callbacks.EarlyStopping(patience=3, restore_best_weights=True),
            tf.keras.callbacks.ReduceLROnPlateau(patience=2, factor=0.5, verbose=1)
        ],
        verbose=2)
    # 输出训练结果
    val_acc = max(history1.history['val_accuracy'])
    print(f"阶段1验证准确率: {val_acc:.2%}")

    # 阶段 2：在阶段1的基础上进行微调（修复版本）
    # 不要重新构建模型，而是直接使用阶段1训练好的模型
    # 解冻 backbone 进行微调
    # 通过层名称找到backbone（MobileNetV3Small）
    backbone = None
    for layer in model.layers:
        if 'mobilenetv3' in layer.name.lower():
            backbone = layer
            break

    if backbone is not None:
        print(f"找到backbone: {backbone.name}")
        # 解冻backbone的最后30层
        for layer in backbone.layers[-30:]:
            layer.trainable = True

        # 重新编译模型，使用更小的学习率进行微调
        model.compile(optimizer=tf.keras.optimizers.Adam(1e-5),  # 微调使用更小的学习率
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        print("开始阶段2微调...")
        history2 = model.fit(
            train_ds,
            validation_data=val_ds,
            epochs=10,
            callbacks=[tf.keras.callbacks.EarlyStopping(patience=2, restore_best_weights=True)],
            verbose=2)

        # 输出最终结果
        final_val_acc = max(history2.history['val_accuracy'])
        print(f"阶段2验证准确率: {final_val_acc:.2%}")
    else:
        print("未找到backbone，跳过微调阶段")
        final_val_acc = val_acc

    # -------------------------------------------------
    # 6. 导出模型（测试不量化版本）
    print("导出不量化的TFLite模型进行测试...")
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    # 不使用优化（不量化）
    # converter.optimizations = [tf.lite.Optimize.DEFAULT]
    tflite_model_no_quant = converter.convert()
    tflite_path_no_quant = MODEL_DIR / 'herbs_cpu_no_quant.tflite'
    tflite_path_no_quant.write_bytes(tflite_model_no_quant)
    print(f'未量化模型大小: {len(tflite_model_no_quant) / 1024:.1f} KB → {tflite_path_no_quant}')

    # 同时保留量化版本
    converter_quant = tf.lite.TFLiteConverter.from_keras_model(model)
    converter_quant.optimizations = [tf.lite.Optimize.DEFAULT]
    tflite_model_quant = converter_quant.convert()
    tflite_path_quant = MODEL_DIR / 'herbs_cpu.tflite'
    tflite_path_quant.write_bytes(tflite_model_quant)
    print(f'量化模型大小: {len(tflite_model_quant) / 1024:.1f} KB → {tflite_path_quant}')



# -------------------------------------------------
if __name__ == '__main__':
    train()