import os
import json
import logging
import pickle
from datetime import datetime

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import random_split

from .dataset import WindowDataSet, SeqDataSet, BearingSampler, PartSampler
from .loss import get_loss_fn
from .seq2seq import Seq2Seq, Encoder, Decoder
from .convTransformer import ForcastConvTransformer
from .pyraformer import Pyraformer
from .informer import Informer
from .rnns import MyLSTM, MyGRU
from .utils import plot_results, calculate_score
from .transformer import Transformer
from .crate import CRATE_tiny
from .MTCT import T_Transformer


class RUL:

    def __init__(self, data_path, model_config_path, save_path, meta_information, model_config, train_config, data=None,
                 log_path=None):
        self.data_path = data_path
        self.model_config_path = model_config_path
        self.save_path = save_path
        self.meta_information = meta_information
        self.model_config = model_config
        self.train_config = train_config
        self.logger = logging.getLogger(self.__class__.__name__)
        if log_path:
            self.logger.addHandler(logging.FileHandler(log_path))
        self.logger.setLevel(logging.INFO)
        self.data = data

    def train(self):
        raise NotImplementedError

    def predict(self):
        raise NotImplementedError

    def train_epochs(self, num_epochs, model, optimizer, loss_fn, train_loader, val_loader, save_path,
                     learning_rate, lr_warmup, grad_clip, device, early_stop=False):

        warm_step = min(int(0.1 * num_epochs), 300)
        best_train_loss = 1e8
        best_val_loss = 1e8
        nonpogress_train_step = 0
        nonprogress_val_step = 0
        train_loss_save = 0

        for epoch in range(num_epochs):

            if epoch <= warm_step:
                lr = learning_rate * (epoch + 1) * lr_warmup
            elif lr > learning_rate:
                lr = lr ** (1.0001)
            else:
                lr = learning_rate
                # optimizer.lr = lr

            count = 0
            epoch_loss = 0
            model.train()
            for i, (train_batch, tokens_idx, labels_batch) in enumerate(train_loader):
                optimizer.zero_grad()
                batch_size = train_batch.shape[0]
                train_batch = train_batch.to(torch.float32).to(device)
                labels_batch = labels_batch.to(torch.float32).to(device)
                tokens_idx = tokens_idx.to(device)

                y_train = model(train_batch)
                loss = loss_fn(y_train, labels_batch)
                epoch_loss += loss.cpu()
                count += 1
                loss.backward()
                if grad_clip and grad_clip > 0:
                    clip_grad_norm_(model.parameters(), grad_clip)
                optimizer.step()

            torch.cuda.empty_cache()
            if count > 0:
                epoch_loss = epoch_loss / count

            if epoch_loss < best_train_loss:
                train_loss_save += 1
                best_train_loss = epoch_loss
                nonpogress_train_step = 0
                # if epoch > 20 and train_loss_save >= 5:
                #     print(f"save model at epoch {epoch} due to train_loss")
                #     torch.save({"epoch": epoch,
                #                 "model_state_dict": model.state_dict(),
                #                 "optimizer_state_dict": optimizer.state_dict(),
                #                 "loss": loss}, save_path)
                #     train_loss_save = 0
            else:
                nonpogress_train_step += 1

            val_count = 0
            val_loss = 0
            model.eval()
            with torch.no_grad():
                for i, (val_batch, tokens_idx, labels_batch) in enumerate(val_loader):
                    batch_size = val_batch.shape[0]
                    test_data = val_batch.to(torch.float32).to(device)
                    test_label = labels_batch.to(torch.float32).to(device)
                    y_test = model(test_data)
                    loss = loss_fn(y_test, test_label)

                    val_loss += loss.cpu()
                    val_count += 1

            if val_count > 0:
                val_loss = val_loss / val_count

            self.logger.info(
                f"Epoch number: {epoch} -- train loss {epoch_loss:.5} -- val loss {val_loss:.5} -- best train loss {best_train_loss:.5} -- best val loss {best_val_loss:.5} -- lr {lr:.5}")
            print(
                f"Epoch number: {epoch} -- train loss {epoch_loss:.4} -- val loss {val_loss:.5} -- best train loss {best_train_loss:.5} -- best val loss {best_val_loss:.5} -- lr {lr:.5}")
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                nonprogress_val_step = 0
                if epoch >= 0:
                    print(f"save model at epoch {epoch} due to val_loss")
                    torch.save({"epoch": epoch,
                                "model_state_dict": model.state_dict(),
                                "optimizer_state_dict": optimizer.state_dict(),
                                "loss": loss}, save_path)

                # 防止train_loss保存
                train_loss_save = 0
            else:
                nonprogress_val_step += 1
            # plot_results(y_test, test_label, 1, ["test"], self.save_path, epoch)

            if early_stop and nonprogress_val_step >= 30 and nonpogress_train_step >= 20:
                break

    @staticmethod
    def evaluate(model, data, device, batch_size=256):

        model.eval()
        with torch.no_grad():
            y_pred = model(data).item()
        return y_pred

    @staticmethod
    def cal_score(er):
        return np.exp(np.log(0.5) * er * (np.sign(er) * 12.5 - 7.5))
               # np.exp(np.log(0.5) * er / (np.sign(er) * 7.5 + 12.5))

class MTCTRUL(RUL):
    def train(self):
        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        batch_size = self.train_config.get("batch_size", 320)
        predict_batch_size = self.train_config.get("predict_batch_size", 64)

        # num_tokens = self.meta_information["num_tokens"]
        num_tokens = 1
        feature_size = self.meta_information["feature_size"]

        input_size = self.model_config.get("input_size", 40)
        stride_size = self.model_config.get("stride_size", 5)

        d_model = self.model_config.get("d_model", 128)
        n_layer = self.model_config.get("n_layer", 1)

        d_inner_hid = self.model_config.get("d_inner_hid", 128)
        n_head = self.model_config.get("n_head", 4)
        dropout_prob = self.model_config.get("dropout", 0.05)

        d_k = d_model // n_head
        d_v = d_k

        train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)

        train_loader = DataLoader(train_set, batch_size, sampler=PartSampler(train_set, 0, 1, True), num_workers=0)
        val_loader = DataLoader(test_set, predict_batch_size, sampler=PartSampler(test_set, 0, 1, True),
                                num_workers=0)

        model = T_Transformer(1, 1)

        PATH = os.path.join(self.save_path, datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")) + ".pth"

        created_model_config = dict()
        created_model_config["d_model"] = d_model
        created_model_config["n_head"] = n_head
        created_model_config["n_layer"] = n_layer

        created_model_config["d_k"] = d_k
        created_model_config["d_v"] = d_v
        created_model_config["feature_size"] = feature_size
        created_model_config["d_inner_hid"] = d_inner_hid
        created_model_config["input_size"] = input_size
        created_model_config["stride_size"] = stride_size
        created_model_config["path"] = PATH

        # train setup
        num_epochs = self.train_config.get("num_epochs", 60)
        lr_warmup = self.train_config.get("lr_warmup", 10)
        learning_rate = self.train_config.get("learning_rate", 4e-4)
        loss_kind = self.train_config.get("loss_kind", 'c_mse')
        grad_clip = self.train_config.get("grad_clip", 2.0)

        opt = torch.optim.Adam(lr=learning_rate, params=model.parameters())

        loss_fn = get_loss_fn(loss_kind, 1, 2, 2.0, 1)
        self.train_epochs(num_epochs, model, opt, loss_fn, train_loader, val_loader,
                          PATH, learning_rate, lr_warmup, grad_clip, device, False)

        with open(self.model_config_path, "w") as file:
            json.dump(created_model_config, file)


    def predict(self):
        feature_size = self.meta_information["feature_size"]
        train_names = self.meta_information["train_name"]
        test_names = self.meta_information["test_name"]
        full_life = self.meta_information["full_life"]
        # num_tokens = self.meta_information["num_tokens"]
        num_tokens = 1
        input_size = self.model_config["input_size"]
        d_model = self.model_config["d_model"]
        d_k = self.model_config["d_k"]
        d_v = self.model_config["d_v"]
        d_inner_hid = self.model_config["d_inner_hid"]
        n_head = self.model_config["n_head"]
        n_layer = self.model_config["n_layer"]

        stride_size = 1

        PATH = self.model_config["path"]

        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)
        train_loader = DataLoader(train_set, batch_sampler=BearingSampler(train_set))
        test_loader = DataLoader(test_set, batch_sampler=BearingSampler(test_set))

        model = T_Transformer(1, 1)

        checkpoint = torch.load(PATH, map_location=torch.device(device))
        model.load_state_dict(checkpoint["model_state_dict"])
        y_train, train_labels = self.evaluate(model, train_loader, device)
        plot_results(y_train, train_labels, stride_size, train_names, self.save_path, 0)

        y_test, test_labels = self.evaluate(model, test_loader, device)
        plot_results(y_test, test_labels, stride_size, test_names, self.save_path, 0)



class TransformerRUL(RUL):

    def train(self):
        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        batch_size = self.train_config.get("batch_size", 64)
        predict_batch_size = self.train_config.get("predict_batch_size", 64)

        # num_tokens = self.meta_information["num_tokens"]
        num_tokens = 1
        feature_size = self.meta_information["feature_size"]

        input_size = self.model_config.get("input_size", 32)
        stride_size = self.model_config.get("stride_size", 1)

        d_model = self.model_config.get("d_model", 256)
        n_layer = self.model_config.get("n_layer", 3)

        d_inner_hid = self.model_config.get("d_inner_hid", 128)
        n_head = self.model_config.get("n_head", 8)
        dropout_prob = self.model_config.get("dropout", 0.05)

        d_k = d_model // n_head
        d_v = d_k

        train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)

        train_loader = DataLoader(train_set, batch_size, sampler=PartSampler(train_set, 0, 0.6, True), num_workers=0)
        val_loader = DataLoader(test_set, predict_batch_size, sampler=PartSampler(test_set, 0.6, 1, True),
                                num_workers=0)

        model = Transformer(d_model, input_size, feature_size, n_layer, n_head, d_inner_hid, d_k, d_v, num_tokens,
                            dropout_prob, device)

        PATH = os.path.join(self.save_path, datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")) + ".pth"

        created_model_config = dict()
        created_model_config["d_model"] = d_model
        created_model_config["n_head"] = n_head
        created_model_config["n_layer"] = n_layer

        created_model_config["d_k"] = d_k
        created_model_config["d_v"] = d_v
        created_model_config["feature_size"] = feature_size
        created_model_config["d_inner_hid"] = d_inner_hid
        created_model_config["input_size"] = input_size
        created_model_config["stride_size"] = stride_size
        created_model_config["path"] = PATH

        # train setup
        num_epochs = self.train_config.get("num_epochs", 100)
        lr_warmup = self.train_config.get("lr_warmup", 10)
        learning_rate = self.train_config.get("learning_rate", 1e-4)
        loss_kind = self.train_config.get("loss_kind", 'mse')
        grad_clip = self.train_config.get("grad_clip", 2.0)

        opt = torch.optim.Adam(lr=learning_rate, params=model.parameters())

        loss_fn = get_loss_fn(loss_kind, 1, 2, 2.0, 1)
        self.train_epochs(num_epochs, model, opt, loss_fn, train_loader, val_loader,
                          PATH, learning_rate, lr_warmup, grad_clip, device, False)

        with open(self.model_config_path, "w") as file:
            json.dump(created_model_config, file)

    def predict(self):
        feature_size = self.meta_information["feature_size"]
        train_names = self.meta_information["train_name"]
        test_names = self.meta_information["test_name"]
        full_life = self.meta_information["full_life"]
        # num_tokens = self.meta_information["num_tokens"]
        num_tokens = 1
        input_size = self.model_config["input_size"]
        d_model = self.model_config["d_model"]
        d_k = self.model_config["d_k"]
        d_v = self.model_config["d_v"]
        d_inner_hid = self.model_config["d_inner_hid"]
        n_head = self.model_config["n_head"]
        n_layer = self.model_config["n_layer"]

        stride_size = 1

        PATH = self.model_config["path"]

        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)
        train_loader = DataLoader(train_set, batch_sampler=BearingSampler(train_set))
        test_loader = DataLoader(test_set, batch_sampler=BearingSampler(test_set))

        model = Transformer(d_model, input_size, feature_size, n_layer, n_head, d_inner_hid, d_k, d_v, num_tokens, 0,
                            device)

        checkpoint = torch.load(PATH, map_location=torch.device(device))
        model.load_state_dict(checkpoint["model_state_dict"])
        y_train, train_labels = self.evaluate(model, train_loader, device)
        plot_results(y_train, train_labels, stride_size, train_names, self.save_path, 0)

        y_test, test_labels = self.evaluate(model, test_loader, device)
        plot_results(y_test, test_labels, stride_size, test_names, self.save_path, 0)


class ConvTransformerRUL(RUL):

    def train(self):
        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        batch_size = self.train_config.get("batch_size", 64)
        predict_batch_size = self.train_config.get("predict_batch_size", 64)

        num_tokens = self.meta_information["num_tokens"]
        full_life = self.meta_information["full_life"]
        feature_size = self.meta_information["feature_size"]

        input_size = self.model_config.get("input_size", 32)
        stride_size = self.model_config.get("stride_size", 1)
        headers = self.model_config.get("headers", 4)
        number_of_vars = self.model_config.get("k", 256)
        depth = self.model_config.get("depth", 1)
        kernel_size = self.model_config.get("kernel_size", 8)
        hidden_size = self.model_config.get("hidden_size", 200)
        dropout_prob = self.model_config.get("dropout", 0.2)

        train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)

        train_loader = DataLoader(train_set, batch_size,
                                  sampler=RandomSampler(train_set, generator=torch.Generator(device=device)),
                                  num_workers=0)
        test_loader = DataLoader(test_set, predict_batch_size, sampler=SequentialSampler(test_set), num_workers=0)

        print(len(train_loader))
        print(len(test_loader))

        model = ForcastConvTransformer(feature_size, number_of_vars, headers, depth, input_size, hidden_size,
                                       kernel_size, num_tokens=num_tokens, dropout_prob=dropout_prob)

        PATH = os.path.join(self.save_path, datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")) + ".pth"

        created_model_config = dict()
        created_model_config["k"] = number_of_vars
        created_model_config["headers"] = headers
        created_model_config["depth"] = depth
        created_model_config["input_size"] = input_size
        created_model_config["stride_size"] = stride_size
        created_model_config["kernel_size"] = kernel_size
        created_model_config["num_tokens"] = num_tokens
        created_model_config["feature_size"] = feature_size
        created_model_config["hidden_size"] = hidden_size
        created_model_config["dropout"] = dropout_prob
        created_model_config["path"] = PATH

        # train setup
        num_epochs = self.train_config.get("num_epochs", 50)
        lr_warmup = self.train_config.get("lr_warmup", 10)
        learning_rate = self.train_config.get("learning_rate", 4e-5)
        loss_kind = self.train_config.get("loss_kind", 'mse')
        grad_clip = self.train_config.get("grad_clip", 5.0)

        opt = torch.optim.Adam(lr=learning_rate, params=model.parameters())

        loss_fn = get_loss_fn(loss_kind, full_life)
        self.train_epochs(num_epochs, model, opt, loss_fn, train_loader, test_loader,
                          PATH, learning_rate, lr_warmup, grad_clip, device, True)

        with open(self.model_config_path, "w") as file:
            json.dump(created_model_config, file)

    def predict(self):
        feature_size = self.meta_information["feature_size"]
        train_names = self.meta_information["train_name"]
        test_names = self.meta_information["test_name"]
        full_life = self.meta_information["full_life"]

        number_of_vars = self.model_config["k"]
        num_tokens = self.model_config["num_tokens"]
        input_size = self.model_config["input_size"]
        stride_size = self.model_config["stride_size"]

        headers = self.model_config["headers"]
        depth = self.model_config["depth"]
        kernel_size = self.model_config["kernel_size"]
        hidden_size = self.model_config["hidden_size"]
        dropout_prob = self.model_config["dropout"]

        PATH = self.model_config["path"]

        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)

        train_loader = DataLoader(train_set, batch_sampler=BearingSampler(train_set))
        test_loader = DataLoader(test_set, batch_sampler=BearingSampler(test_set))

        # model = ForcastConvTransformer(feature_size, number_of_vars, headers, depth, input_size,
        #                            kernel_size, num_tokens=num_tokens)
        model = ForcastConvTransformer(feature_size, number_of_vars, headers, depth, input_size, hidden_size,
                                       kernel_size, num_tokens=num_tokens)

        checkpoint = torch.load(PATH, map_location=torch.device(device))
        model.load_state_dict(checkpoint["model_state_dict"])

        y_train, train_labels = self.evaluate(model, train_loader, device)
        y_test, test_labels = self.evaluate(model, test_loader, device)
        plot_results(y_train, train_labels, 1, train_names, self.save_path)
        plot_results(y_test, test_labels, 1, test_names, self.save_path)


class LSTMRUL(RUL):

    def train(self):
        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"
        batch_size = 1
        predict_size = 1

        train_set = SeqDataSet(self.data_path, "train")
        test_set = SeqDataSet(self.data_path, "test")

        # 计算拆分的长度
        # total_size = len(train_set)
        # train_size = int(0.8 * total_size)  # 80%的长度
        # val_size = total_size - train_size  # 剩余的20%
        #
        # # 使用random_split进行拆分
        # train_set, val_set = random_split(train_set, [train_size, val_size], generator=torch.Generator(device='cuda'))
        train_loader = DataLoader(train_set, batch_size)
        val_loader = DataLoader(test_set, predict_size)

        print(len(train_loader))
        print(len(val_loader))

        feature_size = self.meta_information["feature_size"]
        num_tokens = self.meta_information["num_tokens"]

        d_model = self.model_config.get("d_model", 200)
        num_layers = self.model_config.get("num_layers", 3)
        hidden_size = self.model_config.get("hidden_size", 200)
        dropout_prob = self.model_config.get("dropout", 0.2)

        model = MyLSTM(feature_size, d_model, hidden_size, num_layers, num_tokens, dropout_prob)

        PATH = os.path.join(self.save_path, datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")) + ".pth"
        created_model_config = dict()
        created_model_config["feature_size"] = feature_size
        created_model_config["hidden_size"] = hidden_size
        created_model_config["num_layers"] = num_layers
        created_model_config["dropout"] = dropout_prob
        created_model_config["d_model"] = d_model
        created_model_config["path"] = PATH

        num_epochs = self.train_config.get("num_epochs", 100)
        lr_warmup = self.train_config.get("lr_warmup", 10)
        learning_rate = self.train_config.get("learning_rate", 1e-4)
        loss_kind = self.train_config.get("loss_kind", "mse")
        grad_clip = self.train_config.get("grad_clip", 2.0)

        loss_fn = get_loss_fn(loss_kind, 1, 2, 2.0, 1)
        opt = torch.optim.Adam(model.parameters(), learning_rate)

        self.train_epochs(num_epochs, model, opt, loss_fn, train_loader, val_loader,
                          PATH, learning_rate, lr_warmup, grad_clip, device, True)

        with open(self.model_config_path, "w") as file:
            json.dump(created_model_config, file)

    def predict(self):
        feature_size = self.meta_information["feature_size"]
        num_tokens = self.meta_information["num_tokens"]
        train_names = self.meta_information["train_name"]
        test_names = self.meta_information["test_name"]
        full_life = self.meta_information["full_life"]

        d_model = self.model_config["d_model"]
        num_layers = self.model_config["num_layers"]
        hidden_size = self.model_config["hidden_size"]

        PATH = self.model_config["path"]

        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        model = MyLSTM(feature_size, d_model, hidden_size, num_layers, num_tokens, 0)
        train_set = SeqDataSet(self.data_path, "train")
        test_set = SeqDataSet(self.data_path, "test")

        train_loader = DataLoader(train_set, 1)
        test_loader = DataLoader(test_set, 1)

        checkpoint = torch.load(PATH, map_location=torch.device(device))
        model.load_state_dict(checkpoint["model_state_dict"])

        y_train, train_labels = self.evaluate(model, train_loader, device)
        y_test, test_labels = self.evaluate(model, test_loader, device)
        plot_results(y_train, train_labels, 1, train_names, self.save_path)
        plot_results(y_test, test_labels, 1, test_names, self.save_path)
        ers, score = calculate_score(y_test, test_labels)
        print(f"Score: {score:.4f} -- ERS: {ers}")


class GRURUL(RUL):

    def train(self):
        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"
        batch_size = 1
        predict_size = 1

        train_set = SeqDataSet(self.data_path, "train")
        test_set = SeqDataSet(self.data_path, "test")

        train_loader = DataLoader(train_set, batch_size)
        val_loader = DataLoader(test_set, predict_size)

        feature_size = self.meta_information["feature_size"]
        num_tokens = self.meta_information["num_tokens"]

        d_model = self.model_config.get("d_model", 200)
        num_layers = self.model_config.get("num_layers", 4)
        hidden_size = self.model_config.get("hidden_size", 200)
        dropout_prob = self.model_config.get("dropout", 0.2)

        model = MyGRU(feature_size, d_model, hidden_size, num_layers, num_tokens, dropout_prob)

        PATH = os.path.join(self.save_path, datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")) + ".pth"
        created_model_config = dict()
        created_model_config["feature_size"] = feature_size
        created_model_config["hidden_size"] = hidden_size
        created_model_config["num_layers"] = num_layers
        created_model_config["dropout"] = dropout_prob
        created_model_config["d_model"] = d_model
        created_model_config["path"] = PATH

        num_epochs = self.train_config.get("num_epochs", 3000)
        lr_warmup = self.train_config.get("lr_warmup", 10)
        learning_rate = self.train_config.get("learning_rate", 1e-5)
        loss_kind = self.train_config.get("loss_kind", "mse")
        grad_clip = self.train_config.get("grad_clip", 2.0)

        loss_fn = get_loss_fn(loss_kind, 1, 2, 2.0, 1)
        opt = torch.optim.Adam(model.parameters(), learning_rate)

        self.train_epochs(num_epochs, model, opt, loss_fn, train_loader, val_loader,
                          PATH, learning_rate, lr_warmup, grad_clip, device, True)

        with open(self.model_config_path, "w") as file:
            json.dump(created_model_config, file)

    def predict(self):
        feature_size = self.meta_information["feature_size"]
        num_tokens = self.meta_information["num_tokens"]
        train_names = self.meta_information["train_name"]
        test_names = self.meta_information["test_name"]
        full_life = self.meta_information["full_life"]

        d_model = self.model_config["d_model"]
        num_layers = self.model_config["num_layers"]
        hidden_size = self.model_config["hidden_size"]

        PATH = self.model_config["path"]

        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        model = MyGRU(feature_size, d_model, hidden_size, num_layers, num_tokens, 0)
        train_set = SeqDataSet(self.data_path, "train")
        test_set = SeqDataSet(self.data_path, "test")

        train_loader = DataLoader(train_set, 1)
        test_loader = DataLoader(test_set, 1)

        checkpoint = torch.load(PATH, map_location=torch.device(device))
        model.load_state_dict(checkpoint["model_state_dict"])

        y_train, train_labels = self.evaluate(model, train_loader, device)
        y_test, test_labels = self.evaluate(model, test_loader, device)
        plot_results(y_train, train_labels, 1, train_names, self.save_path)
        plot_results(y_test, test_labels, 1, test_names, self.save_path)


class Seq2SeqRUL(RUL):

    def train(self):
        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"
        batch_size = 1
        predict_size = 1

        train_set = SeqDataSet(self.data_path, "train")
        test_set = SeqDataSet(self.data_path, "test")

        train_loader = DataLoader(train_set, batch_size)
        test_loader = DataLoader(test_set, predict_size)

        num_tokens = self.meta_information["num_tokens"]
        full_life = self.meta_information["full_life"]
        feature_size = self.meta_information["feature_size"]

        hidden_size = self.model_config.get("hidden_size", 200)
        cnn_ks = self.model_config.get("cnn_ks", 8)
        stride = self.model_config.get("stride", 5)
        number_of_vars = self.model_config.get("k", 64)
        encoder_layers = self.model_config.get("encoder_layers", 1)
        decoder_layers = self.model_config.get("decoder_layers", 1)
        dropout_prob = self.model_config.get("dropout", 0.2)
        teacher_forcing_ratio = self.model_config.get("teacher_forcing_ratio", 0.3)

        PATH = os.path.join(self.save_path, datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")) + ".pth"

        encoder = Encoder(feature_size, number_of_vars, hidden_size, cnn_ks, stride, num_tokens,
                          n_layers=encoder_layers, dropout=dropout_prob)
        decoder = Decoder(hidden_size, 1, n_layers=decoder_layers, dropout=dropout_prob)
        model = Seq2Seq(encoder, decoder, teacher_forcing_ratio)

        created_model_config = dict()
        created_model_config["feature_size"] = feature_size
        created_model_config["hidden_size"] = hidden_size
        created_model_config["cnn_ks"] = cnn_ks
        created_model_config["stride"] = stride
        created_model_config["k"] = number_of_vars
        created_model_config["encoder_layers"] = encoder_layers
        created_model_config["decoder_layers"] = decoder_layers
        created_model_config["dropout"] = dropout_prob
        created_model_config["teacher_forcing_ratio"] = teacher_forcing_ratio
        created_model_config["path"] = PATH

        num_epochs = self.train_config.get("num_epochs", 10000)
        lr_warmup = self.train_config.get("lr_warmup", 10)
        learning_rate = self.train_config.get("learning_rate", 1e-5)
        gamma = self.train_config.get("gamma", 0.7)
        loss_kind = self.train_config.get("loss_kind", "mse")
        grad_clip = self.train_config.get("grad_clip", 2.0)

        opt = torch.optim.Adam(model.parameters(), lr=learning_rate)

        train_times = 0
        better_times = 0
        best_test_loss = 10000000
        loss_fn = get_loss_fn(loss_kind, full_life)
        warm_step = int(0.2 * num_epochs)

        for epoch in range(num_epochs):
            model.train(True)
            count = 0
            train_loss = 0
            if epoch <= warm_step:
                lr = learning_rate * (epoch + 1) * lr_warmup
            elif lr > learning_rate:
                lr = lr ** (1.0004)
            else:
                lr = learning_rate
            opt.lr = lr
            for i, (train_batch, tokens_idx, labels_batch) in enumerate(train_loader):
                opt.zero_grad()
                train_batch = train_batch.to(torch.float32).to(device)
                labels_batch = labels_batch.to(torch.float32).to(device)
                tokens_idx = tokens_idx.to(device)

                y_train = model(train_batch, labels_batch, tokens_idx)
                loss = loss_fn(y_train, labels_batch)

                train_loss += loss
                loss.backward()
                clip_grad_norm_(model.parameters(), grad_clip)
                opt.step()
                train_times += 1
                count += 1

            train_loss = train_loss / count

            model.eval()
            with torch.no_grad():

                count = 0
                test_loss = 0
                for i, (test_batch, tokens_idx, labels_batch) in enumerate(test_loader):
                    test_batch = test_batch.to(torch.float32).to(device)
                    labels_batch = labels_batch.to(torch.float32).to(device)
                    tokens_idx = tokens_idx.to(device)

                    y_test = model(test_batch, labels_batch, tokens_idx)
                    loss = loss_fn(y_test, labels_batch)
                    test_loss += loss
                    count += 1
                if count > 0:
                    test_loss = test_loss / count
                print(f"Epoch number: {epoch} -- train loss {train_loss:.4} -- val loss {test_loss:.4} -- lr {lr:.6}")
            if test_loss < best_test_loss or epoch == 0:
                best_test_loss = test_loss
                torch.save({"epoch": epoch, "model_state_dict": model.state_dict(),
                            "optimizer_state_dict": opt.state_dict(), "loss": loss}, PATH)
            if train_loss < test_loss * 0.2:
                better_times += 1

            if better_times >= 3 or train_times >= 100:
                model.teacher_forcing_ratio *= gamma
                better_times -= 1
                train_times = 0

        with open(self.model_config_path, "w") as file:
            json.dump(created_model_config, file)

    def predict(self):
        feature_size = self.meta_information["feature_size"]
        num_tokens = self.meta_information["num_tokens"]
        train_names = self.meta_information["train_name"]
        test_names = self.meta_information["test_name"]
        full_life = self.meta_information["full_life"]

        feature_size = self.model_config["feature_size"]
        hidden_size = self.model_config["hidden_size"]
        num_of_vars = self.model_config["k"]
        cnn_ks = self.model_config["cnn_ks"]
        stride = self.model_config["stride"]
        encoder_layers = self.model_config["encoder_layers"]
        decoder_layers = self.model_config["decoder_layers"]
        dropout_prob = self.model_config["dropout"]
        # teacher_forcing_ratio = self.model_config["teacher_forcing_ratio"]
        PATH = self.model_config["path"]

        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        train_set = SeqDataSet(self.data_path, "train")
        test_set = SeqDataSet(self.data_path, "test")

        train_loader = DataLoader(train_set, batch_size=1)
        test_loader = DataLoader(test_set, batch_size=1)

        encoder = Encoder(feature_size, num_of_vars, hidden_size, cnn_ks, stride, num_tokens, n_layers=encoder_layers,
                          dropout=dropout_prob)
        decoder = Decoder(hidden_size, 1, n_layers=decoder_layers, dropout=dropout_prob)
        model = Seq2Seq(encoder, decoder, 0)

        checkpoint = torch.load(PATH, map_location=torch.device(device))
        model.load_state_dict(checkpoint["model_state_dict"])

        train_results = []
        train_labels = []

        model.eval()
        with torch.no_grad():
            for i, (train_batch, token_idx, label_batch) in enumerate(train_loader):
                train_batch = train_batch.to(torch.float32).to(device)
                label_batch = label_batch.to(torch.float32).to(device)
                token_idx = token_idx.to(device)
                train_pred = model(train_batch, label_batch, token_idx, 0)
                train_results.append(train_pred.cpu().numpy().reshape(-1))
                train_labels.append(label_batch.cpu().numpy().reshape(-1))
        plot_results(train_results, train_labels, 1, train_names, full_life, self.save_path)

        test_results = []
        test_labels = []
        model.eval()
        with torch.no_grad():
            for i, (test_batch, token_idx, label_batch) in enumerate(test_loader):
                test_batch = test_batch.to(torch.float32).to(device)
                label_batch = label_batch.to(torch.float32).to(device)
                token_idx = token_idx.to(device)
                train_pred = model(test_batch, label_batch, token_idx, 0)
                test_results.append(train_pred.cpu().numpy().reshape(-1))
                test_labels.append(label_batch.cpu().numpy().reshape(-1))

        plot_results(test_results, test_labels, 1, test_names, full_life, self.save_path)


class PyraformerRUL(RUL):

    def train(self):
        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        batch_size = self.train_config.get("batch_size", 32)
        predict_batch_size = self.train_config.get("predict_batch_size", 64)

        # num_tokens = self.meta_information["num_tokens"]
        num_tokens = 1
        feature_size = self.meta_information["feature_size"]

        input_size = self.model_config.get("input_size", 10)
        stride_size = self.model_config.get("stride_size", 1)

        decoder_type = self.model_config.get("decoder_type", "Attention")
        d_model = self.model_config.get("d_model", 256)
        window_set = self.model_config.get("window_set", [4])
        n_layer = self.model_config.get("n_layer", 3)

        d_inner_hid = self.model_config.get("d_inner_hid", 512)
        n_head = self.model_config.get("n_head", 4)
        inner_size = self.model_config.get("inner_size", 6)
        dropout_prob = self.model_config.get("dropout", 0.0)

        d_k = d_model // n_head
        d_v = d_k

        train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)

        train_loader = DataLoader(train_set, batch_size, sampler=PartSampler(train_set, 0, 0.6, True), num_workers=0)
        val_loader = DataLoader(test_set, predict_batch_size, sampler=PartSampler(test_set, 0.6, 1), num_workers=0)
        # val_loader = DataLoader(train_set, predict_batch_size, sampler=PartSampler(train_set, 0.6, 1, True), num_workers=0)

        print(len(train_loader))
        print(len(val_loader))
        model = Pyraformer(d_model, window_set, input_size, feature_size, inner_size,
                           n_layer, n_head, d_inner_hid, d_k, d_v, num_tokens, decoder_type,
                           dropout_prob, device)

        PATH = os.path.join(self.save_path, datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")) + ".pth"

        created_model_config = dict()
        created_model_config["decoder_type"] = decoder_type
        created_model_config["d_model"] = d_model
        created_model_config["n_head"] = n_head
        created_model_config["n_layer"] = n_layer
        created_model_config["window_set"] = window_set

        created_model_config["d_k"] = d_k
        created_model_config["d_v"] = d_v
        created_model_config["feature_size"] = feature_size
        created_model_config["d_inner_hid"] = d_inner_hid
        created_model_config["inner_size"] = inner_size
        created_model_config["input_size"] = input_size
        created_model_config["stride_size"] = stride_size
        created_model_config["path"] = PATH

        # train setup
        num_epochs = self.train_config.get("num_epochs", 200)
        lr_warmup = self.train_config.get("lr_warmup", 10)
        learning_rate = self.train_config.get("learning_rate", 1e-5)
        loss_kind = self.train_config.get("loss_kind", 'mse')
        grad_clip = self.train_config.get("grad_clip", 2.0)

        opt = torch.optim.Adam(lr=learning_rate, params=model.parameters())

        loss_fn = get_loss_fn(loss_kind, 1, 2, 2.0, 1)
        self.train_epochs(num_epochs, model, opt, loss_fn, train_loader, val_loader,
                          PATH, learning_rate, lr_warmup, grad_clip, device, True)

        with open(self.model_config_path, "w") as file:
            json.dump(created_model_config, file)

    def predict(self):
        feature_size = self.meta_information["feature_size"]
        train_names = self.meta_information["train_name"]
        test_names = self.meta_information["test_name"]
        full_life = self.meta_information["full_life"]
        # num_tokens = self.meta_information["num_tokens"]
        num_tokens = 1
        input_size = self.model_config["input_size"]
        d_model = self.model_config["d_model"]
        window_set = self.model_config["window_set"]
        d_k = self.model_config["d_k"]
        d_v = self.model_config["d_v"]
        d_inner_hid = self.model_config["d_inner_hid"]
        n_head = self.model_config["n_head"]
        n_layer = self.model_config["n_layer"]
        inner_size = self.model_config["inner_size"]
        decoder_type = self.model_config["decoder_type"]

        stride_size = 1

        PATH = self.model_config["path"]

        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)
        train_loader = DataLoader(train_set, batch_sampler=BearingSampler(train_set))
        test_loader = DataLoader(test_set, batch_sampler=BearingSampler(test_set))

        model = Pyraformer(d_model, window_set, input_size, feature_size, inner_size,
                           n_layer, n_head, d_inner_hid, d_k, d_v, num_tokens, decoder_type,
                           0, device)

        checkpoint = torch.load(PATH, map_location=torch.device(device))
        model.load_state_dict(checkpoint["model_state_dict"])
        y_train, train_labels = self.evaluate(model, train_loader, device)
        plot_results(y_train, train_labels, stride_size, train_names, self.save_path, 0)

        y_test, test_labels = self.evaluate(model, test_loader, device)
        plot_results(y_test, test_labels, stride_size, test_names, self.save_path, 0)
        ers, score = calculate_score(y_test, test_labels)
        print(f"Score: {score:.4f} -- ERS: {ers}")


class CrateRUL(RUL):

    def train(self):
        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        feature_size = self.meta_information["feature_size"]

        batch_size = self.train_config.get("batch_size", 256)
        prediction_batch_size = self.train_config.get("prediction_batch_size", 256)
        input_size = self.model_config.get("input_size", 40)
        stride_size = self.model_config.get("stride_size", 5)

        train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)

        train_loader = DataLoader(train_set, batch_size, sampler=PartSampler(train_set, 0, 1, True), num_workers=0)
        val_loader = DataLoader(test_set, prediction_batch_size, sampler=PartSampler(test_set, 0, 1, False), num_workers=0)
        # val_loader = DataLoader(train_set, prediction_batch_size, sampler=PartSampler(train_set, 0.8, 1, True), num_workers=0)
        print(len(train_loader))
        print(len(val_loader))

        model = CRATE_tiny(1, feature_size, input_size)

        PATH = os.path.join(self.save_path, datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")) + ".pth"

        created_model_config = dict()
        created_model_config["num_classes"] = 1
        created_model_config["dim"] = 192
        created_model_config["depth"] = 3

        created_model_config["heads"] = 4
        created_model_config["dropout"] = 0
        created_model_config["emb_dropout"] = 0.2
        created_model_config["dim_head"] = 192 // 4
        created_model_config["path"] = PATH
        created_model_config["input_size"] = input_size
        created_model_config["stride_size"] = stride_size

        num_epochs = self.train_config.get("num_epochs", 60)
        lr_warmup = self.train_config.get("lr_warmup", 10)
        learning_rate = self.train_config.get("learning_rate", 8e-4)
        loss_kind = self.train_config.get("loss_kind", "weibull_mse")
        grad_clip = self.train_config.get("grad_clip", 2.0)

        opt = torch.optim.Adam(lr=learning_rate, params=model.parameters())
        # opt = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)

        loss_fn = get_loss_fn(loss_kind, 1, 2.28, 4.8, 2)

        self.train_epochs(num_epochs, model, opt, loss_fn, train_loader, val_loader,
                          PATH, learning_rate, lr_warmup, grad_clip, device, True)

        with open(self.model_config_path, "w") as file:
            json.dump(created_model_config, file)

    def predict(self):
        feature_size = self.meta_information["feature_size"]
        train_names = self.meta_information["train_name"]
        test_names = self.meta_information["test_name"]
        full_life = self.meta_information["full_life"]

        path = self.model_config["path"]

        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"
        input_size = self.model_config["input_size"]
        stride_size = self.model_config["stride_size"]

        # train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        # test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)
        # train_loader = DataLoader(train_set, batch_sampler=BearingSampler(train_set))
        # test_loader = DataLoader(test_set, batch_sampler=BearingSampler(test_set))

        # 读取保存的原始数据
        # file_name = os.path.join(self.data_path, "line_data")
        # with open(file_name, 'rb') as f:
        #     data = pickle.load(f)
        data = self.data

        # file_path = "D:\desktop/rul_system\system\data\phm2012/test_data.npy"
        # data1 = np.load(file_path, allow_pickle=True)
        # data = data1[1000-40:1000]
        data = torch.tensor(np.expand_dims(data, axis=0).repeat(40, axis=1), dtype=torch.float)
        # data = torch.tensor(np.expand_dims(data, axis=0), dtype=torch.float)

        model = CRATE_tiny(1, feature_size=feature_size, input_size=input_size)
        checkpoint = torch.load(path, map_location=torch.device(device))
        model.load_state_dict(checkpoint["model_state_dict"])
        # y_train, train_labels = self.evaluate(model, train_loader, device)
        #
        # plot_results(y_train, train_labels, stride_size, train_names, self.save_path, 0)

        y_pred = self.evaluate(model, data, device)
        # plot_results(y_test, test_labels, stride_size, test_names, self.save_path, 0)
        # ers, score = calculate_score(y_test, test_labels)
        print(f"predcition is {y_pred:.4}")
        return y_pred


class InformerRUL(RUL):

    def train(self):
        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"
        batch_size = self.train_config.get("batch_size", 32)
        predict_batch_size = self.train_config.get("predict_batch_size", 32)

        num_tokens = self.meta_information["num_tokens"]
        full_life = self.meta_information["full_life"]
        feature_size = self.meta_information["feature_size"]

        input_size = self.model_config.get("input_size", 64)
        stride_size = self.model_config.get("stride_size", 1)

        factor = self.model_config.get("factor", 5)
        d_model = self.model_config.get("d_model", 256)
        d_ff = self.model_config.get("d_ff", 384)
        n_layer = self.model_config.get("n_layer", 4)
        n_head = self.model_config.get("n_head", 4)
        dropout_prob = self.model_config.get("dropout", 0.1)

        train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)

        train_loader = DataLoader(train_set, batch_size, sampler=PartSampler(train_set, 0, 0.6), num_workers=0)
        val_loader = DataLoader(test_set, predict_batch_size, sampler=PartSampler(test_set, 0.6), num_workers=0)

        model = Informer(factor, d_model, input_size, feature_size, n_head, n_layer, d_ff, num_tokens,
                         device, dropout_prob)
        PATH = os.path.join(self.save_path, datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")) + ".pth"

        created_model_config = dict()
        created_model_config["d_model"] = d_model
        created_model_config["n_head"] = n_head
        created_model_config["n_layer"] = n_layer
        created_model_config["factor"] = factor
        created_model_config["d_ff"] = d_ff
        created_model_config["input_size"] = input_size
        created_model_config["stride_size"] = stride_size

        created_model_config["feature_size"] = feature_size
        created_model_config["path"] = PATH

        # train setup
        num_epochs = self.train_config.get("num_epochs", 10000)
        lr_warmup = self.train_config.get("lr_warmup", 10)
        learning_rate = self.train_config.get("learning_rate", 1e-5)
        loss_kind = self.train_config.get("loss_kind", 'mse')
        grad_clip = self.train_config.get("grad_clip", 2.0)

        opt = torch.optim.Adam(lr=learning_rate, params=model.parameters())

        loss_fn = get_loss_fn(loss_kind)
        self.train_epochs(num_epochs, model, opt, loss_fn, train_loader, val_loader,
                          PATH, learning_rate, lr_warmup, grad_clip, device)

        with open(self.model_config_path, "w") as file:
            json.dump(created_model_config, file)

    def predict(self):
        feature_size = self.meta_information["feature_size"]
        train_names = self.meta_information["train_name"]
        test_names = self.meta_information["test_name"]
        full_life = self.meta_information["full_life"]
        num_tokens = self.meta_information["num_tokens"]

        input_size = self.model_config["input_size"]
        d_model = self.model_config["d_model"]

        factor = self.model_config["factor"]
        d_ff = self.model_config["d_ff"]
        n_head = self.model_config["n_head"]
        n_layer = self.model_config["n_layer"]

        stride_size = 1

        PATH = self.model_config["path"]

        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            device = "cuda"
        else:
            device = "cpu"

        train_set = WindowDataSet(self.data_path, "train", input_size, stride_size)
        test_set = WindowDataSet(self.data_path, "test", input_size, stride_size)

        train_loader = DataLoader(train_set, batch_sampler=BearingSampler(train_set))
        test_loader = DataLoader(test_set, batch_sampler=BearingSampler(test_set))

        model = Informer(factor, d_model, input_size, feature_size, n_head, n_layer, d_ff, num_tokens,
                         device, 0)

        checkpoint = torch.load(PATH, map_location=torch.device(device))
        model.load_state_dict(checkpoint["model_state_dict"])

        y_train, train_labels = self.evaluate(model, train_loader, device)
        plot_results(y_train, train_labels, stride_size, train_names, self.save_path)

        y_test, test_labels = self.evaluate(model, test_loader, device)
        plot_results(y_test, test_labels, stride_size, test_names, self.save_path)
