import tensorflow as tf
from tensorflow.data.experimental import AutoShardPolicy
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import pandas as pd
import os, math, datetime
import sys

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from loss.loss_function import AsymmetricLoss, FocalLoss
from model.model import efficientnetv2_m as create_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger
from toolbox.visualization import plot_metrics
from toolbox.save_model import file_deduplication
from optimizer.CosineWarmupDecay import CosineWarmupDecay
from toolbox.image_generator import add_salt_and_pepper_noise

# 获取所有 GPU 设备
gpus = tf.config.list_physical_devices('GPU')
if gpus:
    try:
        # 设置 GPU 内存增长
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
    except RuntimeError as e:
        print(e)

# --------------------------------------------
# 基本参数
# --------------------------------------------
input_size = (384, 384)  # target_size 需是 tuple
batch_size = 90
num_classes = 15
train_df = pd.read_csv(r'/home/lsh2022400251/dataset/Tongue.v1i.multiclass/train/_classes.csv')
val_df = pd.read_csv(r'/home/lsh2022400251/dataset/Tongue.v1i.multiclass/valid/_classes.csv')
test_df = pd.read_csv(r'/home/lsh2022400251/dataset/Tongue.v1i.multiclass/test/_classes.csv')

train_dir = r'/home/lsh2022400251/dataset/Tongue.v1i.multiclass/train'
val_dir = r'/home/lsh2022400251/dataset/Tongue.v1i.multiclass/valid'
test_dir = r'/home/lsh2022400251/dataset/Tongue.v1i.multiclass/test'
label_cols = train_df.columns[1:]  # 15 个类别列名

# --------------------------------------------
# 余弦退火参数
# --------------------------------------------
n_train_tasks = 7939
n_val_tasks = 973
meta_batch_size = 90
one_epoch_batchs = int(n_train_tasks / meta_batch_size)
total_step = 50 * one_epoch_batchs
warmup_step = int(total_step * 0.1)
multi = 0.25
print_step = - one_epoch_batchs

# --------------------------------------------
# ImageDataGenerator —— 仅旋转增强
# --------------------------------------------
"""
train_datagen = ImageDataGenerator(
    rotation_range=180,  # 随机旋转 0~180°
    horizontal_flip=True,  # 随机水平翻转
    brightness_range=(0.7, 1.3),  # 亮度便宜
)"""


def custom_augmentation(image):
    image = add_salt_and_pepper_noise(image, salt_prob=0.0005)  # 0.05% 的椒盐噪声
    return image


train_datagen = ImageDataGenerator(
    rescale=1. / 255,  # ← 新增
    rotation_range=180,
    width_shift_range=0.1,
    height_shift_range=0.1,
    zoom_range=0.2,
    horizontal_flip=True,
    brightness_range=(0.8, 1.2),
    # channel_shift_range=5.0,
    preprocessing_function=custom_augmentation
)
valid_datagen = ImageDataGenerator(
    rescale=1. / 255,  # ← 新增
)  # 验证 / 测试不做增强

# 使用 ImageDataGenerator 创建训练、验证和测试的生成器
train_generator = train_datagen.flow_from_dataframe(
    dataframe=train_df,
    directory=train_dir,
    x_col='filename',
    y_col=label_cols,
    target_size=input_size,
    batch_size=batch_size,
    class_mode='raw',  # 多标签回归/多标签分类
    shuffle=True,
)

valid_generator = valid_datagen.flow_from_dataframe(
    dataframe=val_df,
    directory=val_dir,
    x_col='filename',
    y_col=label_cols,
    target_size=input_size,
    batch_size=batch_size,
    class_mode='raw',
    shuffle=False,
)

test_generator = valid_datagen.flow_from_dataframe(
    dataframe=test_df,
    directory=test_dir,
    x_col='filename',
    y_col=label_cols,
    target_size=input_size,
    batch_size=batch_size,
    class_mode='raw',
    shuffle=False,
)


# --------------------------------------------
# 创建 tf.data.Dataset 并应用分片策略
# --------------------------------------------

def apply_shard_policy(dataset):
    options = tf.data.Options()
    options.experimental_distribute.auto_shard_policy = AutoShardPolicy.DATA
    return dataset.with_options(options)


# 创建 tf.data.Dataset
def create_tf_data_from_generator(generator):
    dataset = tf.data.Dataset.from_generator(
        generator,
        output_signature=(
            tf.TensorSpec(shape=(None, *input_size, 3), dtype=tf.float32),
            tf.TensorSpec(shape=(None, num_classes), dtype=tf.float32)
        )
    )
    return apply_shard_policy(dataset)


# 转换生成器为 tf.data.Dataset 并应用分片策略
train_dataset = create_tf_data_from_generator(lambda: train_generator)
valid_dataset = create_tf_data_from_generator(lambda: valid_generator)
test_dataset = create_tf_data_from_generator(lambda: test_generator)

# --------------------------------------------
# 分布式训练 / 模型编译
# --------------------------------------------

strategy = tf.distribute.MirroredStrategy(
    devices=[
        "/gpu:0",
        "/gpu:1",
        "/gpu:2"
    ])
with strategy.scope():
    model = create_model(num_classes=num_classes)
    model.build((None, *input_size, 3))

    weights_path = '../efficientnetv2-m.h5'
    if os.path.exists(weights_path):
        model.load_weights(weights_path, by_name=True, skip_mismatch=True)
    else:
        print(f"预训练权重 {weights_path} 不存在")

    cosine_warmup_decay = CosineWarmupDecay(initial_lr=0.005,  # 初始学习率，即最大学习率
                                            min_lr=0.0005,  # 学习率下降的最小值
                                            warmup_step=warmup_step,  # 线性上升部分的step
                                            total_step=total_step,  # 训练的总step
                                            multi=multi,  # 周期调整的倍率
                                            print_step=print_step)  # 每个epoch打印一次学习率值

    model.compile(
        optimizer=tf.keras.optimizers.Adam(cosine_warmup_decay),
        loss=FocalLoss(),
        metrics=[
            tf.keras.metrics.BinaryAccuracy(name='accuracy'),
            tf.keras.metrics.Precision(name='precision'),
            tf.keras.metrics.Recall(name='recall')
        ]
    )

# --------------------------------------------
# 训练
# --------------------------------------------
os.makedirs("../checkpoints", exist_ok=True)
save_path = "../checkpoints/efficientnetv2_best.h5"
save_path = file_deduplication(save_path)
"""
history = model.fit(
    train_dataset,
    epochs=50,
    validation_data=valid_dataset,
    steps_per_epoch=len(train_generator),
    validation_steps=len(valid_generator),
    callbacks=callbacks      # ← 新增
)
"""
# 创建一个日志目录
save_log_path = '../log/efficientNetV2_generate2_train.csv'

# 确保该目录存在
import os

if not os.path.exists(save_log_path):
    os.makedirs(save_log_path)
save_log_path = file_deduplication(save_log_path)
callbacks = [
    # ① 提前停止：val_loss 5 个 epoch 不提升就停止训练并回滚到最佳权重
    EarlyStopping(
        monitor="val_loss",
        patience=5,
        mode="min",
        restore_best_weights=True,
        verbose=1
    ),
    # ② 保存模型：每当 val_loss 改善时保存一份权重
    ModelCheckpoint(
        filepath=save_path,
        monitor="val_loss",
        mode="min",
        save_best_only=True,
        save_weights_only=True,
        verbose=1
    ),
    CSVLogger(save_log_path)
]

steps_per_epoch = n_train_tasks // batch_size
validation_steps = n_val_tasks // batch_size
print(f"--------------------{steps_per_epoch}----------------------")
print(f"--------------------{validation_steps}----------------------")
history = model.fit(
    train_dataset,
    epochs=60,
    validation_data=valid_dataset,
    steps_per_epoch=steps_per_epoch,
    validation_steps=validation_steps,
    callbacks=callbacks  # ← 新增
)
print("-------------------------------训练完毕---------------------------------")

plot_metrics(history, False, "../output/efficientNetV2_pre-training.png")

# --------------------------------------------
# 评估
# --------------------------------------------
# test_results = model.evaluate(test_dataset)
# print(f"测试集结果:\n损失: {test_results[0]:.3f}\n准确率: {test_results[1]:.3f}")
test_results = model.evaluate(test_dataset, steps=len(test_generator))
test_loss = test_results[0]
test_accuracy = test_results[1]
test_precision = test_results[2]
test_recall = test_results[3]
save_log_path = '../log/efficientNetV2_generate2_test.csv'

# 打印结果
print(f"测试集结果:\n损失: {test_loss:.3f}\n准确率: {test_accuracy:.3f}\n TP:{test_precision}\n 召回率: {test_recall}")

# 保存评估结果到文件
with open(save_log_path, 'w') as f:
    f.write(f"测试集结果:\n损失: {test_loss:.3f}\n准确率: {test_accuracy:.3f}\nTP:{test_precision}\n召回率: {test_recall}")
