import os.path
import re

import numpy as np
import time
from threading import Thread
from keras.layers import Input, Dense, Flatten, Dropout, multiply
from keras.layers import BatchNormalization, Embedding
from keras.layers import LeakyReLU
from keras.models import Sequential, Model
from keras.optimizers import Adam
from PyQt5.QtGui import QIntValidator,QDoubleValidator,QTextCursor
from PyQt5.QtWidgets import QWidget,QFileDialog
from PyQt5.QtCore import pyqtSignal
from keras.models import load_model
from gui.windows.CGAN import Ui_CGAN
from data.h5_wr import get_profile_data,save_profile_data
from config.network import custom_comp

class CGANWindow(QWidget, Ui_CGAN):
    signal_update_text = pyqtSignal(str,bool)
    signal_finish_train = pyqtSignal()

    def __init__(self,startWindow):
        super(CGANWindow, self).__init__()
        self.setupUi(self)
        self.setFixedSize(646, 774)
        self.startWindow=startWindow
        self.param_dict=startWindow.param_dict
        self.initial_property()
        self.initial_widget()
        self.connect_slot()

    def initial_property(self):
        self.text_string = ''
        self.traces_profile = None
        self.label_profile = None
        self.traces_gen = None
        self.label_gen = None
        self.generator_model = None
        self.stop_train=False
        self.network_type={
            'MLP':(self.Generator_MLP,self.Discriminator_MLP),
            'CNN':(self.Generator_CNN,self.Discriminator_CNN),
            'LSTM':(self.Generator_LSTM,self.Discriminator_LSTM)
        }

    def initial_widget(self):
        self.network.addItems(self.network_type)
        self.network.setCurrentText(list(self.network_type.keys())[0])
        self.epochs.setText(str(50000))
        self.batch_size.setText(str(100))
        self.learning_rate.setText(str(0.00001))
        self.gen_traces_num.setText(str(1000))
        self.latent_dim.setText(str(200))
        self.merge_raw_dataset.setChecked(True)
        self.set_func_choose_status()
        with open("resource/style.qss", encoding='UTF-8') as f:
            self.setStyleSheet(f.read())
        self.set_validator()

    def connect_slot(self):
        self.gen_model.clicked.connect(self.set_func_choose_status)
        self.gen_traces.clicked.connect(self.set_func_choose_status)
        self.merge_raw_dataset.clicked.connect(self.set_func_choose_status)
        self.original_dataset_path_browser.clicked.connect(self.browse_oringinal_dataset_path)
        self.generator_path_browser.clicked.connect(self.browser_generator_path)
        self.start_run.clicked.connect(self.start_process)
        self.signal_update_text.connect(self.update_text)
        self.signal_finish_train.connect(lambda:self.start_run.setText("开始执行"))
        self.related_info.textChanged.connect(lambda: self.related_info.moveCursor(QTextCursor.End))

    def start_process(self):
        try:
            if self.start_run.text()=='结束训练':
                self.stop_train=True
                return
            if self.gen_model.isChecked():
                if self.traces_profile is None or self.label_profile is None:
                    raise ValueError("未加载用于训练的数据！")
                self.train_CGAN()
            else:
                if self.generator_model is None:
                    raise ValueError("请先加载或训练生成器模型！")
                self.start_run.setEnabled(False)
                self.generate_traces()
                self.start_run.setEnabled(True)
        except Exception as e:
            self.update_text(repr(e))
            self.update_text('处理失败！')

    def train_th(self,generator,discriminator,combined,batch_size,epochs,latent_dim):
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))
        for epoch in range(epochs):
            idx = np.random.randint(0, self.traces_profile.shape[0], batch_size)
            traces, labels = self.traces_profile[idx], self.label_profile[idx]
            noise = np.random.normal(0, 1, (batch_size, latent_dim))
            gen_traces = generator.predict([noise, labels],verbose=0)
            d_loss_real = discriminator.train_on_batch([traces, labels], valid)
            d_loss_fake = discriminator.train_on_batch([gen_traces, labels], fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
            sampled_labels = np.random.randint(0, 9, batch_size).reshape(-1, 1)
            g_loss = combined.train_on_batch([noise, sampled_labels], valid)
            self.signal_update_text.emit(f"epoch-{epoch}: [Discriminator loss: {d_loss[0]:.2f}, acc: {d_loss[1]:.2f}] [Generator loss: {g_loss:.2f}]",False)
            if self.stop_train:
                self.stop_train=False
                break
        self.generator_model = generator
        self.signal_update_text.emit("训练完成.",False)
        self.save_generator(generator)
        self.signal_finish_train.emit()

    def train_CGAN(self):
        epochs = int(self.epochs.text())
        batch_size = int(self.batch_size.text())
        traces_point = self.traces_profile.shape[1]
        latent_dim = int(self.latent_dim.text())
        label_cls = int(self.label_cls.text())
        learning_rate = float(self.learning_rate.text())
        generator_func,discriminator_func=self.network_type[self.network.currentText()]
        generator=generator_func(traces_point,latent_dim,label_cls)
        discriminator=discriminator_func(traces_point,label_cls)
        optimizer = Adam(learning_rate, 0.9)
        discriminator.compile(
            loss=['binary_crossentropy'],
            optimizer=optimizer,
            metrics=['accuracy'])
        discriminator.trainable = False
        noise = Input(shape=(latent_dim,))
        label = Input(shape=(1,))
        gen_traces = generator([noise, label])
        valid = discriminator([gen_traces, label])
        combined = Model([noise, label], valid)
        combined.compile(
            loss=['binary_crossentropy'],
            optimizer=optimizer)
        th_train = Thread(target=self.train_th,args=(generator,discriminator,combined,batch_size,epochs,latent_dim))
        self.start_run.setText('结束训练')
        th_train.start()

    def generate_traces(self):
        try:
            gen_traces_num=int(self.gen_traces_num.text())
            latent_dim = int(self.latent_dim.text())
            label_cls = int(self.label_cls.text())
            noise=np.random.normal(0, 1, (gen_traces_num, latent_dim))
            sampled_labels = np.random.randint(0, label_cls, gen_traces_num).reshape(-1, 1)
            gen_traces = self.generator_model.predict([noise, sampled_labels])
            sampled_labels=sampled_labels.reshape(-1)
            if self.merge_raw_dataset.isChecked():
                path=self.original_dataset_path.text()
                if os.path.exists(path):
                    traces, label, description = get_profile_data(path)
                    traces=np.vstack((traces,gen_traces))
                    label=np.hstack((label,sampled_labels))
                    if "traces_shape" in description:
                        traces_shape=str(traces.shape).replace(',', '，')
                        match = re.search(r'traces_shape.*?\(', description)
                        index1 = match.end() - 1
                        match = re.search(r'traces_shape.*?\)', description)
                        index2 = match.end()
                        description=description[:index1]+traces_shape+description[index2:]
                    if "midvalue_shape" in description:
                        midvalue_shape=str(label.shape).replace(',', '，')
                        match = re.search(r'midvalue_shape.*?\(', description)
                        index1 = match.end() - 1
                        match = re.search(r'midvalue_shape.*?\)', description)
                        index2 = match.end()
                        description=description[:index1] + midvalue_shape + description[index2:]
                    save_profile_data(path,traces,label,description)
                    self.update_text(f"生成的{gen_traces_num}条建模数据已合并到{path}.")
                else:
                    raise RuntimeError("待合并建模数据集路径不存在！")
            else:
                ctime = time.localtime()
                dir = self.param_dict['profile_file_dir'] + f"{ctime.tm_year}-{ctime.tm_mon}-{ctime.tm_mday}/"
                if os.path.exists(dir) is False:
                    os.makedirs(dir)
                hms = f'{ctime.tm_hour:02d}{ctime.tm_min:02d}{ctime.tm_sec:02d}'
                file_name = f"Generator_{self.network.currentText()}_{gen_traces_num} traces_{hms}.h5"
                fname, _ = QFileDialog.getSaveFileName(self, "Save file", dir + file_name, "*.h5")
                if fname:
                    description={}
                    description["dataset_type"] = "profile dataset"
                    description["description"] = f'profile dataset generated by generator {self.generator_path.text()}.'
                    description["traces_shape"] = str(gen_traces.shape).replace(',', '，')
                    description["midvalue_shape"] = str(sampled_labels.shape).replace(',', '，')
                    save_profile_data(fname,gen_traces,sampled_labels,description=description)
                    self.update_text(f"生成的建模数据迹已保存到{fname}.")
        except Exception as e:
            self.update_text(repr(e))
            self.update_text("生成能量迹失败！")

    def save_generator(self,generator):
        try:
            ctime = time.localtime()
            dir = self.param_dict['model_file_dir'] + f"{ctime.tm_year}-{ctime.tm_mon}-{ctime.tm_mday}/"
            if not os.path.exists(dir):
                os.makedirs(dir)
            hms = f'{ctime.tm_hour:02d}{ctime.tm_min:02d}{ctime.tm_sec:02d}'
            file_name = f"Generator_{self.network.currentText()}_{hms}.h5"
            fname, _ = QFileDialog.getSaveFileName(self, "Save file", dir + file_name, "*.h5")
            if fname:
                generator.save(fname)
                self.signal_update_text.emit(f"生成器模型已保存到{fname}.",False)
            else:
                self.signal_update_text.emit("模型未保存！", False)
        except Exception as e:
            self.signal_update_text.emit(repr(e),False)
            self.signal_update_text.emit("模型保存失败！",False)

    def set_func_choose_status(self):
        generate_model = self.gen_model.isChecked()
        generate_traces = self.gen_traces.isChecked()
        merge_dataset=self.merge_raw_dataset.isChecked()
        need_original_dataset=generate_model or merge_dataset
        self.merge_raw_dataset.setHidden(generate_model)
        self.original_dataset_path_L.setHidden(not need_original_dataset)
        self.original_dataset_path.setHidden(not need_original_dataset)
        self.original_dataset_path_browser.setHidden(not need_original_dataset)
        self.generator_path_L.setHidden(generate_model)
        self.generator_path.setHidden(generate_model)
        self.generator_path_browser.setHidden(generate_model)
        self.network_L.setHidden(generate_traces)
        self.network.setHidden(generate_traces)
        self.epochs_L.setHidden(generate_traces)
        self.epochs.setHidden(generate_traces)
        self.batch_size_L.setHidden(generate_traces)
        self.batch_size.setHidden(generate_traces)
        self.learning_rate_L.setHidden(generate_traces)
        self.learning_rate.setHidden(generate_traces)
        self.gen_traces_num_L.setHidden(generate_model)
        self.gen_traces_num.setHidden(generate_model)

    def browse_oringinal_dataset_path(self):
        try:
            fname,_ = QFileDialog.getOpenFileName(self, "Open file", self.param_dict['profile_file_dir'], "*.h5")
            if fname:
                relative_path = '../' + os.path.relpath(fname, start='../').replace('\\', '/')
                self.traces_profile, self.label_profile,description=get_profile_data(relative_path)
                self.label_profile=self.label_profile.reshape(-1,1)
                self.original_dataset_path.setText(relative_path)
                label_cls_guess = np.max(self.label_profile) + 1
                self.label_cls.setText(str(label_cls_guess))
                description = description.replace(',', '\n').replace('{', '{\n').replace('}', '\n}')
                self.update_text(f"原建模数据集信息：\n{description}")
                self.update_text("加载原建模数据成功.", False)
        except Exception as e:
            self.update_text(repr(e))
            self.update_text("加载原建模数据失败！")

    def browser_generator_path(self):
        try:
            fname,_ = QFileDialog.getOpenFileName(self, "Open file", self.param_dict['model_file_dir'], "*.h5")
            if fname:
                self.generator_model = load_model(fname, custom_objects=custom_comp)
                latent_dim_guess=self.generator_model.input_shape
                if len(latent_dim_guess)!=2 or len(latent_dim_guess[0])!=2:
                    self.generator_model=None
                    raise RuntimeError("请选择正确的生成器模型！")
                self.latent_dim.setText(str(latent_dim_guess[0][1]))
                self.generator_path.setText(fname)
                self.update_text("加载生成器模型成功.")
        except Exception as e:
            self.update_text(repr(e))
            self.update_text("加载生成器模型失败(可能选择了错误的文件)！")

    def update_text(self, data, override=False):
        if override:
            self.text_string = ''
        if type(data) is dict:
            self.text_string += 'trs file head:\n'
            for k in data:
                self.text_string += f"  {k}: {data[k]}\n"
            self.text_string += '\n'
        else:
            if data[-1] == '#':
                self.text_string += str(data)[:-1]
            elif len(data)>3 and data[-3:]=='...':
                self.text_string += "(Please Wait)" + str(data) + '\n'
            elif data[-1]=='.':
                self.text_string += "(Execution Success)"+str(data) + '\n'
            elif data[-1]=='!' or data[-1]=='！':
                self.text_string += "(Execution Fail)" + str(data) + '\n'
            else:
                self.text_string += str(data) + '\n'
        self.related_info.setText(self.text_string)
        if len(self.text_string) > 10240:
            self.text_string = self.text_string[7680:]

    def set_validator(self):
        int_validator1 = QIntValidator(10, 1000000, self)
        int_validator2 = QIntValidator(20, 1000, self)
        int_validator3 = QIntValidator(0, 100000, self)
        double_validator=QDoubleValidator(0,1,6,self)
        double_validator.setNotation(QDoubleValidator.StandardNotation)
        self.epochs.setValidator(int_validator1)
        self.batch_size.setValidator(int_validator2)
        self.latent_dim.setValidator(int_validator2)
        self.label_cls.setValidator(int_validator2)
        self.learning_rate.setValidator(double_validator)
        self.gen_traces_num.setValidator(int_validator3)

    def closeEvent(self, a0):
        self.text_string = ''
        self.update_text("#", True)
        self.startWindow.dataWindow.update_process_tips("CGAN窗口已关闭.")
        self.startWindow.dataWindow.show()

    def Generator_MLP(self,traces_point,latent_dim,label_cls):
        model = Sequential()
        model.add(Dense(128, input_dim=latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Dense(traces_point, activation='tanh'))
        noise = Input(shape=(latent_dim,))
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(label_cls, latent_dim)(label))
        model_input = multiply([noise, label_embedding])
        traces_gen = model(model_input)
        return Model([noise, label], traces_gen)

    def Discriminator_MLP(self,traces_point,label_cls):
        model = Sequential()
        model.add(Dense(512, input_dim=traces_point))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(1, activation='sigmoid'))
        traces = Input(shape=(traces_point,))
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(label_cls, traces_point)(label))
        flat_traces = Flatten()(traces)
        model_input = multiply([flat_traces, label_embedding])
        validity = model(model_input)
        return Model([traces, label], validity)

    def Generator_CNN(self,traces_point,latent_dim,label_cls):
        model = Sequential()
        model.add(Dense(128, input_dim=latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Dense(traces_point, activation='tanh'))
        noise = Input(shape=(latent_dim,))
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(label_cls, latent_dim)(label))
        model_input = multiply([noise, label_embedding])
        traces_gen = model(model_input)
        return Model([noise, label], traces_gen)

    def Discriminator_CNN(self,traces_point,label_cls):
        model = Sequential()
        model.add(Dense(512, input_dim=traces_point))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(1, activation='sigmoid'))
        traces = Input(shape=(traces_point,))
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(label_cls, traces_point)(label))
        flat_traces = Flatten()(traces)
        model_input = multiply([flat_traces, label_embedding])
        validity = model(model_input)
        return Model([traces, label], validity)

    def Generator_LSTM(self,traces_point,latent_dim,label_cls):
        model = Sequential()
        model.add(Dense(128, input_dim=latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Dense(traces_point, activation='tanh'))
        noise = Input(shape=(latent_dim,))
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(label_cls, latent_dim)(label))
        model_input = multiply([noise, label_embedding])
        traces_gen = model(model_input)
        return Model([noise, label], traces_gen)

    def Discriminator_LSTM(self,traces_point,label_cls):
        model = Sequential()
        model.add(Dense(512, input_dim=traces_point))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(1, activation='sigmoid'))
        traces = Input(shape=(traces_point,))
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(label_cls, traces_point)(label))
        flat_traces = Flatten()(traces)
        model_input = multiply([flat_traces, label_embedding])
        validity = model(model_input)
        return Model([traces, label], validity)