#!/usr/bin/env python3
"""
纯 CPU、24 GB 内存可用的中草药分类
python train_cpu.py
"""
import os, shutil, random, tensorflow as tf
from pathlib import Path

# ---------- 参数 ----------
IMG_SIZE   = 96          # 再小一点，CPU 更快
BATCH_SIZE = 16          # 内存 24 G 足够
EPOCHS     = 12
IMAGES_DIR   = Path('./data/images')
TRAIN_DIR  = Path('./data/train')
VAL_DIR    = Path('./data/val')
CLASS_NAMES = ['baihe','dangshen','gouqi','huaihua','jinyinhua']


# # 去掉图片名称中的中文括号
# for cls in CLASS_NAMES:
#     for img_name in os.listdir(TRAIN_DIR/cls):
#         os.rename(TRAIN_DIR/cls/img_name,
#                       TRAIN_DIR/cls/img_name.replace(' ', '').replace('（', '').replace('）', ''))


# ---------- 自动 80/20 切分 ----------
def split_data():
    exts = ('*.jpg', '*.jpeg', '*.png', '*.JPG', '*.JPEG', '*.PNG')
    for cls in CLASS_NAMES:
        (TRAIN_DIR/cls).mkdir(parents=True, exist_ok=True)
        (VAL_DIR/cls).mkdir(parents=True, exist_ok=True)
        imgs = []
        for ext in exts:
            imgs.extend((IMAGES_DIR / cls).glob(ext))
        if not imgs:
            print(f'⚠️  {IMAGES_DIR/cls} 目录下没找到任何图片，请检查路径/扩展名！')
            continue
        print(cls, len(imgs))
        random.shuffle(imgs)
        split = int(0.8*len(imgs))
        for f in imgs[:split]: shutil.copy(f, TRAIN_DIR/cls)
        for f in imgs[split:]: shutil.copy(f, VAL_DIR/cls)
        # ➜ 立即打印
        trn_cnt = len(list((TRAIN_DIR / cls).glob('*')))
        val_cnt = len(list((VAL_DIR / cls).glob('*')))
        print(cls, 'train=', trn_cnt, 'val=', val_cnt)
    print('✅1. 数据集已自动 80/20 切分完毕！')

# ---------- 数据管道 ----------
def make_ds(root, training):
    ds = tf.keras.utils.image_dataset_from_directory(
        root, image_size=(IMG_SIZE, IMG_SIZE), batch_size=BATCH_SIZE,
        label_mode='categorical')
    if training:
        aug = tf.keras.Sequential([
            tf.keras.layers.RandomFlip('horizontal'),
            tf.keras.layers.RandomRotation(0.05),
            tf.keras.layers.RandomZoom(0.05)
        ])
        ds = ds.map(lambda x,y: (aug(x, training=True), y),
                    num_parallel_calls=tf.data.AUTOTUNE)
    return ds.prefetch(tf.data.AUTOTUNE)

# ---------- 模型 ----------
def build():
    base = tf.keras.applications.MobileNetV3Small(
        input_shape=(IMG_SIZE, IMG_SIZE, 3),
        include_top=False,
        weights='imagenet',
        pooling='avg')
    base.trainable = False          # 先冻结
    x = base.output
    x = tf.keras.layers.Dropout(0.2)(x)
    out = tf.keras.layers.Dense(len(CLASS_NAMES), activation='softmax')(x)
    return tf.keras.Model(base.input, out)

# ---------- 训练 ----------
def train():
    split_data()
    train_ds = make_ds(TRAIN_DIR, True)
    val_ds   = make_ds(VAL_DIR,   False)

    model = build()
    model.compile(optimizer=tf.keras.optimizers.Adam(1e-3),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    # 立即打印类别→索引映射
    print('tf.keras 认为的类别顺序:',
          tf.keras.utils.image_dataset_from_directory(
              TRAIN_DIR, batch_size=1).class_names)
    print('脚本里 CLASS_NAMES:', CLASS_NAMES)

    model.fit(train_ds,
              validation_data=val_ds,
              epochs=EPOCHS,
              callbacks=[tf.keras.callbacks.EarlyStopping(patience=3,
                                                          restore_best_weights=True)])

    # 量化导出
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    tflite = converter.convert()
    Path('herbs_cpu.tflite').write_bytes(tflite)
    print('Done! model size: %.1f KB' % (len(tflite)/1024))

if __name__ == '__main__':
    train()