# -*- coding: utf-8 -*-
import tensorflow as tf
from dataset import PoetryDataGenerator, poetry, tokenizer
from train_model.model import model
import settings
import utils
import os
import datetime
import matplotlib.pyplot as plt


class Evaluate(tf.keras.callbacks.Callback):
    """
    在每个epoch训练完成后，保留最优权重，并随机生成settings.SHOW_NUM首古诗展示
    """
    def __init__(self, output_file, model_save_path):
        super().__init__()
        # 给loss赋一个较大的初始值
        self.lowest = 1e10
        self.output_file = output_file
        self.model_save_path = model_save_path
        self.logs = {"loss": [], "val_loss": [], "accuracy": [], "val_accuracy": []}
        # 如果文件不存在，创建并写入标题
        if not os.path.exists(output_file):
            with open(output_file, "w", encoding="utf-8") as f:
                f.write("# 模型训练日志\n")
                f.write("## 训练开始时间: {}\n\n".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
                f.write("### 训练参数\n")
                f.write(f"- 训练轮数: {settings.TRAIN_EPOCHS}\n")
                f.write(f"- 每轮步数: {data_generator.steps}\n")
                f.write(f"- 显示生成古诗数量: {settings.SHOW_NUM}\n\n")
                f.write("### 每轮训练结果\n")

    def on_epoch_end(self, epoch, logs=None):
        # 在每个epoch训练完成后调用
        with open(self.output_file, "a", encoding="utf-8") as f:
            f.write(f"#### Epoch {epoch + 1}\n")
            f.write(f"- **Loss**: {logs['loss']:.6f}\n")
            f.write(f"- **Validation Loss**: {logs['val_loss']:.6f}\n")
            f.write(f"- **Accuracy**: {logs['accuracy']:.4f}\n")
            f.write(f"- **Validation Accuracy**: {logs['val_accuracy']:.4f}\n")
            # 如果当前val_loss更低，就保存当前模型参数
            if logs['val_loss'] <= self.lowest:
                self.lowest = logs['val_loss']
                model.save(self.model_save_path)
                f.write(f"- **最优模型已保存**: {self.model_save_path}\n")
            else:
                f.write("- **最优模型未更新**\n")
            # 随机生成几首古体诗测试，查看训练效果
            f.write("- **随机生成古诗**:\n")
            for i in range(settings.SHOW_NUM):
                generated_poetry = utils.generate_random_poetry(tokenizer, model)
                f.write(f"  - {generated_poetry}\n")
            f.write("\n")
        # 保存日志数据，用于后续绘图
        self.logs["loss"].append(logs["loss"])
        self.logs["val_loss"].append(logs["val_loss"])
        self.logs["accuracy"].append(logs["accuracy"])
        self.logs["val_accuracy"].append(logs["val_accuracy"])

    def plot_logs(self):
        # 绘制损失曲线
        plt.figure(figsize=(12, 5))
        plt.subplot(1, 2, 1)
        plt.plot(self.logs["loss"], label="Training Loss")
        plt.plot(self.logs["val_loss"], label="Validation Loss")
        plt.title("Loss")
        plt.xlabel("Epoch")
        plt.ylabel("Loss")
        plt.legend()

        # 绘制准确率曲线
        plt.subplot(1, 2, 2)
        plt.plot(self.logs["accuracy"], label="Training Accuracy")
        plt.plot(self.logs["val_accuracy"], label="Validation Accuracy")
        plt.title("Accuracy")
        plt.xlabel("Epoch")
        plt.ylabel("Accuracy")
        plt.legend()

        # 保存图像
        plt.savefig("training_plots.png")
        plt.close()

        # 将图像路径写入 Markdown 文件
        with open(self.output_file, "a", encoding="utf-8") as f:
            f.write("### 训练过程曲线图\n")
            f.write("![Training Plots](training_plots.png)\n")


# 创建数据集
data_generator = PoetryDataGenerator(poetry, random=True)

# 定义输出文件路径
output_file = "training_log_2.md"
model_save_path = settings.BEST_MODEL_PATH

# 创建评估回调
evaluator = Evaluate(output_file, model_save_path)

# 开始训练
model.fit(data_generator.for_fit(),
          steps_per_epoch=data_generator.steps,
          epochs=settings.TRAIN_EPOCHS,
          validation_data=data_generator.for_fit(),
          validation_steps=data_generator.steps,
          callbacks=[evaluator])

# 在训练结束后，写入结束时间
with open(output_file, "a", encoding="utf-8") as f:
    f.write("## 训练结束时间: {}\n".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))

# 绘制训练过程曲线图
evaluator.plot_logs()