import os
import sys

import numpy as np
from scipy.stats import multivariate_normal
from tqdm import tqdm
from keras.callbacks import ModelCheckpoint


class TA:

    def __init__(self):
        self.mean_class_x = None
        self.covMatrix = []
        self.classes = {}
        self.multivariate_normal_templates = None

    def set_para(self, data):
        n = 1
        try:
            for i in np.unique(data.train_y):
                self.classes[i] = []
            for i in range(len(data.train_x)):
                self.classes[data.train_y[i]].append(data.train_x[i])
            self.classes = {
                key: np.array(value)
                for key, value in self.classes.items()
                if len(value) > data.train_x.shape[1]
            }

            # 计算每一类的均值，使用字典存储
            self.mean_class_x = {
                class_y: np.mean(self.classes[class_y], axis=0) for class_y in self.classes
            }

            # 计算每一类的协方差矩阵，使用字典存储
            self.covMatrix = {
                class_y: np.cov(self.classes[class_y].T) for class_y in self.classes
            }

            # 计算每一类的概率密度函数，使用字典存储
            self.multivariate_normal_templates = {
                class_y: multivariate_normal(
                    mean=self.mean_class_x[class_y],
                    cov=self.covMatrix[class_y]
                ) for class_y in self.classes
            }
        except ValueError:
            n = n + 1
            print("兴趣点数太多，能量迹少于兴趣点，进行截断")

    def get_score(self, x):
        proba = np.zeros((len(x), len(self.classes)))

        for i in tqdm(range(len(x)), desc="预测概率"):
            proba[i] = [self.multivariate_normal_templates[class_y].pdf(x[i]) for class_y in self.classes]

        return proba


class DL:
    def __init__(self):
        self.model = None
        self.loss = None
        self.optimizer = None
        self.epochs = None
        self.batch_size = None

    def train(self, data, get_model, loss, learning_rate, optimizer, epochs, batch_size, output_shape, embedding=False):
        # import tensorflow as tf
        self.model = get_model(input_shape=data.train_x[0].shape, output_shape=output_shape, embedding=embedding)
        # self.model.summary()
        self.loss = loss()
        self.optimizer = optimizer(learning_rate)
        self.epochs = epochs
        self.batch_size = batch_size
        self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=["accuracy"])
        if embedding:
            file = os.path.join(data.output_path, "model_E", data.model_id + ".h5")
        else:
            file = os.path.join(data.output_path, "model_C", data.model_id + ".h5")
        self.model.fit(data.train_x, data.train_y, batch_size=self.batch_size, epochs=self.epochs,
                       validation_data=(data.test_x, data.test_y), verbose=1,
                       callbacks=[ModelCheckpoint(file, save_best_only=True)])


    def get_score(self, x):
        return self.model.predict(x)
