import os
import json

import torch

from model.transe import TransE

class CkptSaver:
    def __init__(self, model, max_ckpts=2, outputs="outputs") -> None:
        self._model = model
        self._max_ckpts = max_ckpts
        self._output_dir = outputs
        
        # 检查保存文件的位置是否合法
        if not os.path.isdir(self._output_dir):
            os.makedirs(self._output_dir)

        self.best_k = []
        pass
    
    def update(self, loss):

        filename = os.path.join(self._output_dir, f"{str(loss)[0:8]}.ckpt")
        if len(self.best_k) < self._max_ckpts:  # 小于模型上限，直接保存
            self.dump_ckpt(self._model, filename)
            self.best_k.append((loss, filename))
            self.best_k.sort(key=lambda x:x[0])
        else:
            if loss < self.best_k[-1][0]:
                # 删除最差的模型
                old_l, old_f = self.best_k.pop()
                try:
                    os.remove(old_f)
                except FileNotFoundError:
                    pass

                # 保存最新的模型
                self.dump_ckpt(self._model, filename)
                self.best_k.append((loss, filename))
                self.best_k.sort(key=lambda x:x[0])

            with open(os.path.join(self._output_dir, "latest_checkpoints.json"), 'w', encoding="utf-8") as fp:
                _json = json.dumps(self.best_k, ensure_ascii=False)  # 保存一下当前信息
                fp.write(_json)
    
        return None

    @ staticmethod
    def dump_ckpt(model: TransE, filename):
        device = model.entities_embedding.weight.data.device
        data = {
            "entities_embedding":model.entities_embedding.weight.data.to("cpu"),
            "relations_embedding":model.relations_embedding.weight.data.to("cpu")
        }
        model.entities_embedding.weight.data.to(device)
        model.relations_embedding.weight.data.to(device)
        torch.save(data, filename)

        return None

        