import os
import struct
import time
from scipy.fftpack import fftshift
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import tensorflow as tf
# import prior_factory as prior
from models import spec_aae
from models import plot_utils
import matplotlib
from tensorflow.python.framework import graph_util
plt.rcParams['font.sans-serif'] = ['SimHei']  # 显示中文
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
font = {'size': 20}
matplotlib.rc('font', **font)
from PyQt5.Qt import *
from ExtendWidgets.notifycation import NotificationWindow

# 定义TensorFlow配置
config = tf.ConfigProto()
# 配置GPU内存分配方式，按需增长，很关键
config.gpu_options.allow_growth = True
# 配置可使用的显存比例
config.gpu_options.per_process_gpu_memory_fraction = 0.8
import scipy
from  scipy.fftpack import fftshift
from scipy.signal import stft
# # 在创建session的时候把config作为参数传进去

class Args:
    picture_save_path = ""
    model_save_path = ""
    model_name = ""
    train_data_path = ""
    train_label_path = ""

    prior_type = "normal"
    encoder_type = "CNN"
    decoder_type = "CNN"
    dimz = 50
    n_hidden = 256
    num_epochs = 0
    batch_size = 0
    learning_rate = 0
    PRR = True
    PRR_n_img_x = 5
    PRR_n_img_y = 5
    PRR_resize_factor = 1.0
    PMLR = True  # Plot Manifold Learning Result
    PMLR_n_img_x = 10  # number of images along x-axis in a canvas
    PMLR_n_img_y = 10  # number of images along y-axis in a canvas
    PMLR_resize_factor = 1.0  # resize factor for each image in a canvas
    PMLR_z_range = 3.0  # range for random latent vector
    PMLR_n_samples = 500


def save_subimages(res, name):
    vmax = np.max(res)
    vmin = np.min(res)
    norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
    fig, ax = plt.subplots(nrows=len(res[0]), ncols=len(res), sharex=True, sharey=True, figsize=(15, 15))
    ax_flattern = ax.flatten()
    # fig.text(0.45, 0.04, 'Continuous Features', ha='center')
    # fig.text(0.1, 0.45, 'Feature range [-1,1]', va='center', rotation='vertical')
    for i in range(len(res)):
        for j in range(len(res[0])):
            im = ax_flattern[2 * j + i].imshow(fftshift(res[i][j], axes=1), interpolation='none', aspect='auto', norm=norm)
            ax_flattern[2 * j + i].yaxis.set_major_formatter(FormatStrFormatter('%g'))
    fig.colorbar(im, ax=ax.ravel().tolist(), orientation='vertical')
    plt.suptitle("重构前数据范围:[{:.1f},{:.1f}],重构后数据范围：[{:.1f},{:.1f}]".format(res[0].min(), res[0].max(), res[1].min(),
                                                                          res[1].max()))
    fig.savefig(name)
    plt.close(fig)


def shuffle_in_unison_inplace(a, b, c=[]):
    assert len(a) == len(b)
    p = np.random.permutation(len(a))
    if len(c):
        assert len(c) == len(b)
        return a[p], b[p], c[p]
    else:
        return a[p], b[p]


class TimeUtils():

    @classmethod
    def convert(cls, second):
        if second < 60:
            return f"%.2fs" % second
        elif 60 <= second < 3600:
            return f"%dm%.2fs" % (second // 60, second % 60)
        elif 3600 <= second:
            return f"%dh%dm%.2fs" % (second // 3600, second % 3600 // 60, second % 3600 % 60)

html_str = """
<style type="text/css">
table.gridtable {
    font-family: verdana,arial,sans-serif;
    font-size:13px;
    color:#333333;
    border-width: 1px;
    border-color: #666666;
    border-collapse: collapse;

}
table.gridtable th {
    border-width: 1px;
    padding: 8px;
    border-style: solid;
    border-color: #666666;
    background-color: #dedede;
}
table.gridtable td {
    border-width: 1px;
    padding: 8px;
    border-style: solid;
    border-color: #666666;
    background-color: #ffffff;
    text-align: center;
}
</style>
<body>
<table class="gridtable">
"""

def GetHtml(header, data):
    content = """"""
    columns = header
    content += "<tr>"
    for title in columns:
        content += "<th>{}</th>".format(title)
    content += "</tr>"
    for i in range(len(data)):
        content += "<tr>"
        for j in range(len(data[0])):
            content += '<td><b>{}</b></td>'.format(data[i][j])
        content += "</tr>"
    content += "</table></body>"
    return html_str + content




class TrainProcess(QThread):
    # start_train  = pyqtSignal()
    updateProcessbar = pyqtSignal(float)
    updatePlotOutput = pyqtSignal(list)
    updateThreshold = pyqtSignal(list)
    requestShowPromption = pyqtSignal(list)
    requestShowTrainImage = pyqtSignal(list)
    requestAppendToTextBrower = pyqtSignal(str)


    def __init__(self, parent=None, args: Args = None,train_data_num = 128, train_data = None,  MultiTrain=False, noverlap=None, time_segment=10, nfft=2048, Fs=100e6, nperseg=None, F0=None):
        super(TrainProcess, self).__init__()
        # self.parent = self.parent()
        self.__mutex = QMutex()
        self.MultiTrain = MultiTrain
        self.parent = parent
        self.train_data_num = train_data_num
        # self.deal_data_finished = False
        # todo 循环导入
        from windows.traindockwidget import DockWidget
        self.parent :DockWidget= parent
        self.locked = False
        self.args = args
        self.noverlap = noverlap
        self.time_segment = time_segment
        self.nfft = nfft
        self.Fs = Fs
        self.nperseg = nperseg
        self.F0 = F0
        self.train_data = train_data

        self.setTerminationEnabled(True)

    def recover(self):
        if self.locked:
            self.__mutex.unlock()
            self.locked = False
            self.requestShowPromption.emit(["info", "已恢复训练"])
        # self.parent: DockWidget = self.parent()\

    def pause(self):
        if self.locked == False:
            self.__mutex.lock()
            self.locked = True
            self.requestShowPromption.emit(["info", "已暂停训练"])

    def printToParentTextBrowser(self, text, color='white', font_size='3', font_family='Times New Roman'):
        content = f"""
        <font size={font_size} color={color} style= "font-family:{font_family}; margin:2">{text}</font>
        """
        self.requestAppendToTextBrower.emit(content)


    def run(self):
        """ parameters """
        RESULTS_DIR = self.args.picture_save_path
        model_save_path = self.args.model_save_path  # 模型保存文件夹
        model_name = self.args.model_name
        n_hidden = self.args.n_hidden
        n_epochs = self.args.num_epochs

        if not os.path.exists(model_save_path):
            os.makedirs(model_save_path)

        # todo  check  parent
        self.parent.plot_loss_widget.setXRange(min=1, max=n_epochs)
        self.parent.progress_bar.setMinimum(0)
        self.parent.progress_bar.setMaximum(n_epochs)

        batch_size = self.args.batch_size
        learn_rate = self.args.learning_rate

        PRR = self.args.PRR  # Plot Reproduce Result
        PRR_n_img_x = self.args.PRR_n_img_x  # number of images along x-axis in a canvas
        PRR_n_img_y = self.args.PRR_n_img_y  # number of images along y-axis in a canvas
        PRR_resize_factor = self.args.PRR_resize_factor  # resize factor for each image in a canvas

        PMLR_n_samples = self.args.PMLR_n_samples  # number of labeled samples to plot a map from input data space to the latent space
        if self.isInterruptionRequested():
            print("train 进程 isInterruptionRequested")
            self.requestShowPromption.emit(["info", f'已终止模型训练'])
            self.parent.training = False
            return

        train_data = self.train_data
        print("训练数据范围:", train_data.min(), train_data.max())
        all_train_data = train_data
        # train_data = (train_data - train_data.min()) / (train_data.max() - train_data.min())
        train_labels = np.zeros([train_data.shape[0], 1])

        train_data, train_labels = shuffle_in_unison_inplace(train_data, train_labels)  # 原地打乱数据
        splitval = int(train_data.shape[0] * 0.5)  # 0.5 比例分割数据测试集和验证集
        test_data = train_data[:splitval]
        test_labels = train_labels[:splitval]
        train_data = train_data[splitval:]
        train_labels = train_labels[splitval:]

        # Semsup splitting
        splitval = int(train_data.shape[0] * 0.2)

        train_data_sup = train_data[:splitval]
        train_data = train_data[splitval:]

        train_labels_sup = train_labels[:splitval]
        train_labels = train_labels[splitval:]

        n_samples = train_data.shape[0]
        tsamples = train_data.shape[1]
        fsamples = train_data.shape[2]

        dim_img = [tsamples, fsamples]  # 图片维度
        nlabels = train_labels.shape[1]  # 训练图片标签数

        encoder = "CNN"
        # encoder="LSTM"
        # to visualize learned manifold  可视化学习流
        dim_z = self.args.dimz
        """ build graph """
        # input placeholders
        x_hat = tf.placeholder(tf.float32, shape=[None, tsamples, fsamples], name='input_img')  # 创建占位符
        x = tf.placeholder(tf.float32, shape=[None, tsamples, fsamples], name='target_img')
        x_id = tf.placeholder(tf.float32, shape=[None, nlabels], name='input_img_label')

        # dropout
        keep_prob = tf.placeholder(tf.float32, name='keep_prob')

        # samples drawn from prior distribution
        z_sample = tf.placeholder(tf.float32, shape=[None, dim_z], name='prior_sample')
        cat_sample = tf.placeholder(tf.float32, shape=[None, nlabels], name='prior_sample_label')

        # network architecture
        y, z, neg_marginal_likelihood, D_loss, G_loss, cat_gen_loss, cat = spec_aae.adversarial_autoencoder_semsup_cat_nodimred(
            x_hat, x, x_id, z_sample, cat_sample, dim_img, dim_z, n_hidden, keep_prob, nlabels=nlabels, vdim=2)

        # optimization
        t_vars = tf.trainable_variables()
        d_vars = [var for var in t_vars if "discriminator" or "discriminator_cat" in var.name]
        g_vars = [var for var in t_vars if encoder + "_encoder_cat" in var.name]
        ae_vars = [var for var in t_vars if encoder + "_encoder_cat" or "CNN_decoder" in var.name]

        train_op_ae = tf.train.AdamOptimizer(learn_rate).minimize(neg_marginal_likelihood, var_list=ae_vars)
        train_op_d = tf.train.AdamOptimizer(learn_rate / 2.0).minimize(D_loss, var_list=d_vars)
        train_op_g = tf.train.AdamOptimizer(learn_rate).minimize(G_loss, var_list=g_vars)
        train_op_cat = tf.train.AdamOptimizer(learn_rate).minimize(cat_gen_loss, var_list=g_vars)

        """ training """
        if self.isInterruptionRequested():
            print("train 进程 isInterruptionRequested")
            self.requestShowPromption.emit(["info", f'已终止模型训练'])
            self.parent.training = False
            return

        # Plot for reproduce performance
        if PRR:
            PRR = plot_utils.Plot_Reproduce_Performance(RESULTS_DIR, PRR_n_img_x, PRR_n_img_y, tsamples, fsamples,
                                                        PRR_resize_factor)

            x_PRR = test_data[0:PRR.n_tot_imgs, :]  # 取前PRR_n_img_x*PRR_n_img_y个测试输入结果

            x_PRR_img = x_PRR.reshape(PRR.n_tot_imgs, tsamples, fsamples)
            PRR.save_images(x_PRR_img, name='input.jpg')  # 横轴5个， 纵轴5个

        # Plot for manifold learning result
        # dim_z：特征维度， 默认20

        # 验证集测试数据
        x_PMLR = test_data[0:PMLR_n_samples, :]
        id_PMLR = test_labels[0:PMLR_n_samples, :]


        # 训练集测试数据
        x_train = train_data[0:PMLR_n_samples, :]
        x_train_id_PMLR = test_labels[0:PMLR_n_samples, :]

        # model_train
        total_batch = int(n_samples / batch_size)
        min_tot_loss = 1e99
        prev_loss = 1e99
        self.printToParentTextBrowser("初始化完毕, 开始训练", font_family='微软雅黑', font_size='3')

        self.requestShowPromption.emit(["info", '初始化完毕, 开始训练'])

        self.parent.training = True
        saver = tf.train.Saver()
        with tf.Session(config=config) as sess:
            sess.run(tf.global_variables_initializer(), feed_dict={keep_prob: 0.9})
            time_lst = []
            for epoch in range(n_epochs):
                start = time.time()
                QMetaObject.invokeMethod(self.parent.lineEdit_train_epoch, "setText",
                                         Q_ARG(str, f"{epoch + 1} / {n_epochs}"))
                if epoch == 0:
                    QMetaObject.invokeMethod(self.parent.lineEdit_avg_train_time, "setText",
                                             Q_ARG(str, f"评估中"))
                    QMetaObject.invokeMethod(self.parent.lineEdit_current_total_time, "setText",
                                             Q_ARG(str, f"评估中"))
                    QMetaObject.invokeMethod(self.parent.lineEdit_rest_time, "setText",
                                             Q_ARG(str, f"评估中"))
                else:
                    QMetaObject.invokeMethod(self.parent.lineEdit_avg_train_time, "setText",
                                             Q_ARG(str, f"{TimeUtils.convert(float(np.mean(time_lst)))}"))
                    QMetaObject.invokeMethod(self.parent.lineEdit_current_total_time, "setText",
                                             Q_ARG(str, f"{TimeUtils.convert(float(np.sum(time_lst)))}"))
                    QMetaObject.invokeMethod(self.parent.lineEdit_rest_time, "setText",
                                             Q_ARG(str,
                                                   f"{TimeUtils.convert(float(np.mean(time_lst) * n_epochs - np.sum(time_lst)))}"))

                if self.isInterruptionRequested():
                    print("train 进程 isInterruptionRequested")
                    self.requestShowPromption.emit(["info", f'已终止模型训练'])
                    self.parent.training = False
                    return

                self.__mutex.lock()
                # Random shuffling
                train_data_, train_label_ = shuffle_in_unison_inplace(train_data, train_labels)
                train_data_sup_, train_labels_sup_ = shuffle_in_unison_inplace(train_data_sup, train_labels_sup)

                # Loop over all batches
                for k in range(total_batch):
                    QMetaObject.invokeMethod(self.parent.lineEdit_train_bacth, "setText",
                                             Q_ARG(str, f"{k + 1} / {total_batch}"))
                    if self.isInterruptionRequested():
                        self.requestShowPromption.emit(["info", f'已终止模型训练'])
                        print("train 进程 isInterruptionRequested")
                        self.parent.training = False
                        return
                    # Compute the offset of the current minibatch in the data.
                    offset = (k * batch_size) % (n_samples)
                    offset_sup = (k * batch_size) % (train_data_sup.shape[0])
                    batch_xs_input = train_data_[offset:(offset + batch_size), :]
                    batch_ids_input = train_label_[offset:(offset + batch_size), :]
                    batch_xs_sup_input = train_data_sup_[offset_sup:(offset_sup + batch_size), :]
                    batch_ids_sup_input = train_labels_sup_[offset_sup:(offset_sup + batch_size), :]
                    batch_xs_target = batch_xs_input
                    batch_xs_sup_target = batch_xs_sup_input

                    # draw samples from prior distribution

                    z_id_ = np.random.randint(0, nlabels, size=[batch_size])
                    samples = np.random.normal(0.0, 1, (batch_size, dim_z)).astype(np.float32)
                    z_id_one_hot_vector = np.zeros((batch_size, nlabels))
                    z_id_one_hot_vector[np.arange(batch_size), z_id_] = 1  # 独热编码

                    # reconstruction loss
                    _, loss_likelihood0 = sess.run(
                        (train_op_ae, neg_marginal_likelihood),
                        feed_dict={x_hat: batch_xs_input, x: batch_xs_target, z_sample: samples,
                                   cat_sample: z_id_one_hot_vector, keep_prob: 0.9})

                    _, loss_likelihood1 = sess.run(
                        (train_op_ae, neg_marginal_likelihood),
                        feed_dict={x_hat: batch_xs_sup_input, x: batch_xs_sup_target, z_sample: samples,
                                   cat_sample: batch_ids_sup_input, keep_prob: 0.9})
                    loss_likelihood = (loss_likelihood0 + loss_likelihood1) / 2
                    # discriminator loss
                    _, d_loss = sess.run(
                        (train_op_d, D_loss),
                        feed_dict={x_hat: batch_xs_input, x: batch_xs_target, z_sample: samples,
                                   cat_sample: z_id_one_hot_vector, keep_prob: 0.9})
                    # generator loss
                    for _ in range(2):
                        _, g_loss = sess.run(
                            (train_op_g, G_loss),
                            feed_dict={x_hat: batch_xs_input, x: batch_xs_target, z_sample: samples,
                                       cat_sample: z_id_one_hot_vector, keep_prob: 0.9})
                        # supervised phase
                        _, cat_loss = sess.run(
                            (train_op_cat, cat_gen_loss),
                            feed_dict={x_hat: batch_xs_sup_input, x: batch_xs_sup_target, x_id: batch_ids_sup_input,
                                       keep_prob: 0.9})
                # d_loss：鉴频器的损失
                # loss_likelihood：重建的损失
                # g_loss： 生成器损失
                # cat_loss： 监督学习阶段损失
                tot_loss = loss_likelihood + d_loss + g_loss + cat_loss

                # print cost every epoch
                print("epoch %d: L_tot %03.2f L_likelihood %03.4f d_loss %03.2f g_loss %03.2f " % (
                    epoch, tot_loss, loss_likelihood, d_loss, g_loss))
                self.printToParentTextBrowser("EPOCH: %d  训练集: [总损失: %03.2f 重构损失: %03.4f 鉴别器损失: %03.2f 生成器损失: %03.2f] " % (
                    epoch + 1, tot_loss, loss_likelihood, d_loss, g_loss), font_size='3')

                min_tot_loss = tot_loss

                # 重构保存绘图测试
                y_PRR = sess.run(y, feed_dict={x_hat: x_PRR, keep_prob: 1})  # 使用x_PRR进行测试
                save_subimages([x_PRR[:10], y_PRR[:10]], f"{RESULTS_DIR}/Reco_%02d" % (epoch))

                self.requestShowTrainImage.emit([x_PRR[0], y_PRR[0]])

                retcat, test_cat_loss, test_ll = sess.run((cat, cat_gen_loss, neg_marginal_likelihood),
                                                          feed_dict={x_hat: x_PMLR, x_id: id_PMLR, x: x_PMLR,
                                                                     keep_prob: 1})
                print("Accuracy: ",
                      100.0 * np.sum(np.argmax(retcat, 1) == np.argmax(id_PMLR, 1)) / retcat.shape[0],
                      test_cat_loss, test_ll)
                self.printToParentTextBrowser(f"EPOCH: {epoch + 1} 测试集: [重构损失: %.3f]" % test_ll, font_size='3')


                # 训练集损失
                train_rec_loss = sess.run( neg_marginal_likelihood,
                                          feed_dict={x_hat: x_train, x_id: x_train_id_PMLR, x: x_train,
                                                     keep_prob: 1})
                # 发出绘制图形信号 验证集损失
                self.updatePlotOutput.emit([test_ll, train_rec_loss])

                save_loss = test_cat_loss + test_ll
                if prev_loss > save_loss :  # and epoch!=0: # 测试集上损失更低才会保存
                    prev_loss = save_loss
                    print("保存模型")
                    saver.save(sess, "%s/model"% model_save_path)
                    # constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['reconstruction_loss', 'D_loss', 'G_loss','cat_gen_loss' ])
                    # with tf.gfile.FastGFile("%s/%s.pb"  % (model_save_path, model_name),  mode = "wb") as f:
                    #     f.write(constant_graph.SerializeToString())
                self.updateProcessbar.emit(epoch + 1)
                end = time.time()
                time_lst.append(end - start)
                if epoch % 20 == 19:
                    lst = []
                    for k in range(all_train_data.shape[0] // 100):
                        if self.isInterruptionRequested():
                            self.requestShowPromption.emit(["info", f'已终止模型训练'])
                            self.parent.training = False
                            return
                        x_PRR = all_train_data[k * 100:(k + 1) * 100]
                        y_PRR = sess.run(y, feed_dict={x_hat: x_PRR, keep_prob: 1})  # 使用x_PRR进行测试
                        loss = np.sum((y_PRR - x_PRR) ** 2, axis=(1, 2))
                        lst.append(loss)
                    lst = np.array(lst).reshape([-1])
                    # todo 更改阈值显示
                    self.updateThreshold.emit([np.mean(lst), np.std(lst)])


                self.__mutex.unlock()

            QMetaObject.invokeMethod(self.parent.lineEdit_avg_train_time, "setText",
                                     Q_ARG(str, f"{TimeUtils.convert(float(np.mean(time_lst)))}"))
            QMetaObject.invokeMethod(self.parent.lineEdit_current_total_time, "setText",
                                     Q_ARG(str, f"{TimeUtils.convert(float(np.sum(time_lst)))}"))
            QMetaObject.invokeMethod(self.parent.lineEdit_rest_time, "setText",
                                     Q_ARG(str,
                                           f"{TimeUtils.convert(float(np.mean(time_lst) * n_epochs - np.sum(time_lst)))}"))

            # 计算阈值
            self.printToParentTextBrowser("训练完毕, 计算阈值中...", font_family='微软雅黑', font_size='3')
            lst = []
            for k in range(all_train_data.shape[0] // 100):
                if self.isInterruptionRequested():
                    self.requestShowPromption.emit(["info", f'已终止模型训练'])
                    self.parent.training = False
                    return
                x_PRR = all_train_data[k * 100:(k + 1) * 100]
                y_PRR = sess.run(y, feed_dict={x_hat: x_PRR, keep_prob: 1})  # 使用x_PRR进行测试
                loss = np.sum((y_PRR - x_PRR) ** 2, axis=(1, 2))
                lst.append(loss)
            lst = np.array(lst).reshape([-1])

            # todo 更改阈值显示
            self.updateThreshold.emit([np.mean(lst), np.std(lst)])
            self.printToParentTextBrowser("阈值计算完毕", font_family='微软雅黑', font_size='3')
        self.parent.training = False
        self.requestShowPromption.emit(["success", f'模型训练完毕'])
