from threading import Thread
from torchvision.transforms import Compose, ToPILImage, Normalize
from torch.autograd import Variable
import torch
from NNserver.myNN import ZFNet
from torch.nn import CrossEntropyLoss
from torch.nn.parallel import DataParallel
from copy import deepcopy


DEVICE_IDS = [0, 1]


class FC_Para():
    def __init__(self):
        # 状态变量 - 数据读取
        self.dset_on = False
        self.dset_path = ""
        self.dset_train = None
        self.dset_test = None
        self.dload_train = None
        self.dload_test = None
        # 状态变量 - 样本显示
        self.sam_index = None
        self.sam_count = None
        self.sam_trans = Compose(
            [Normalize([-2.5, -2.5, -2.5], [5.0, 5.0, 5.0]), ToPILImage()])
        self.sam_tsr = None
        self.sam_img = None
        self.sam_tar = None
        self.sam_str = ''
        # 状态变量 - 权重文件
        self.nn_wsave = False
        self.nn_wfile = None
        self.nn_wfok = False
        # 状态变量 - 网络运行参数（模式1）
        self.nno1_dset = 0
        self.nno1_back = False
        self.nno1_epoch = 1
        self.nno1_acur = False
        self.nno1_loss = False
        # 状态变量 - 网络状态
        self.nn_workon = False
        self.nn_ostate = 1
        self.nn_nowepoch = 0
        self.nn_nowbatch = 0
        self.nn_sn1 = False
        self.nn_a1_ok = False
        self.nn_l1_ok = False
        self.nn_sn2 = False
        self.nn_a2_ok = False
        self.nn_l2_ok = False
        # 状态变量 - 网络数据
        self.nn_net = DataParallel(ZFNet().cuda(), device_ids=DEVICE_IDS)
        self.nn_loss = CrossEntropyLoss(reduction='sum')
        self.nn_lr = 0.0005
        self.nn_opt = torch.optim.SGD(self.nn_net.parameters(), lr=self.nn_lr)
        self.nn_inputs = None
        self.nn_inlabs = None
        self.nn_outputs = None
        # 状态变量 - 网络数据统计（模式1）
        self.a1_top1 = None
        self.a1_top5 = None
        self.a1_ctop1 = None
        self.a1_lab5 = None
        self.a1_num = None
        self.a1_cor = None
        self.a1_rate = None
        self.a1_nums = None
        self.a1_cors = None
        self.a1_rates = None
        self.a1_loss = None
        self.a1_losses = None
        # 状态变量 - 网络数据统计（模式2）
        self.a2_top1 = None
        self.a2_top5 = None
        self.a2_ctop1 = None
        self.a2_lab5 = None
        self.a2_num = None
        self.a2_cor = None
        self.a2_rate = None
        self.a2_nums = None
        self.a2_cors = None
        self.a2_rates = None
        self.a2_loss = None
        self.a2_losses = None
        self.a2_maxcor = None
        self.a2_maxepo = 0
        self.a2_maxwf = None
        # 线程调度
        self.th = None

    def Th_run(self, ths):
        self.th = FC_Thread(self, ths)
        self.th.start()


class FC_Thread(Thread):
    def __init__(self, obj, ths):
        super(FC_Thread, self).__init__()
        # 线程控制
        self.p = obj
        self.thr_state = ths

    def run(self):
        # 1:神经网络运行（模式1）
        # 2:神经网络运行（模式2）
        if self.thr_state == 1:
            self.NN_opa1()
        elif self.thr_state == 2:
            self.NN_opa2()

    def NN_opa1(self):
        self.p.nn_a1_ok = False
        self.p.nn_l1_ok = False
        self.p.a1_nums = []
        self.p.a1_cors = []
        self.p.a1_rates = []
        self.p.a1_losses = []
        self.p.nn_net.train(self.p.nno1_back)
        if self.p.nno1_dset == 0:
            ds_len = len(self.p.dset_train)
        elif self.p.nno1_dset == 1:
            ds_len = len(self.p.dset_test)
        if self.p.nno1_acur:
            self.p.a1_num = torch.zeros(2, dtype=torch.int).cuda()
            self.p.a1_num[0] = self.p.a1_num[1] = ds_len
        # epoch层for循环
        for self.p.nn_nowepoch in range(self.p.nno1_epoch):
            self.p.nn_nowbatch = 0
            if self.p.nno1_dset == 0:
                dl_iter = enumerate(self.p.dload_train, 0)
            elif self.p.nno1_dset == 1:
                dl_iter = enumerate(self.p.dload_test, 0)
            # 准备数据统计
            if self.p.nno1_acur:
                self.p.a1_cor = torch.zeros(2, dtype=torch.int).cuda()
            if self.p.nno1_loss:
                self.p.a1_loss = torch.zeros(1).cuda()
            for self.p.nn_nowbatch, (self.p.nn_inputs, self.p.nn_inlabs) in dl_iter:
                # 消息打印
                # print('epoch:{:0>4d},batch:{:0>4d}'.format(self.p.nn_nowepoch + 1, self.p.nn_nowbatch + 1))
                # 载入初始数据
                self.p.nn_inputs = Variable(self.p.nn_inputs.cuda())
                self.p.nn_inlabs = Variable(self.p.nn_inlabs.cuda())
                # 前向与后向
                if self.p.nno1_back:
                    self.p.nn_opt.zero_grad()
                self.p.nn_outputs = self.p.nn_net(self.p.nn_inputs)
                if self.p.nno1_back:
                    b_loss = self.p.nn_loss(self.p.nn_outputs, self.p.nn_inlabs)
                    b_loss.backward()
                    self.p.nn_opt.step()
                elif self.p.nno1_loss:
                    b_loss = torch.tensor(0.0)
                # batch数据统计
                if self.p.nno1_acur:
                    self.p.a1_top1 = torch.max(self.p.nn_outputs, dim=1).indices
                    self.p.a1_ctop1 = (self.p.a1_top1 == self.p.nn_inlabs)
                    self.p.a1_cor[0] += self.p.a1_ctop1.sum()
                    self.p.a1_top5 = torch.topk(self.p.nn_outputs, 5, dim=1, largest=True).indices
                    self.p.a1_lab5 = self.p.nn_inlabs.view(-1, 1)
                    self.p.a1_cor[1] += torch.eq(self.p.a1_top5, self.p.a1_lab5).sum()
                if self.p.nno1_loss:
                    self.p.a1_loss += b_loss
                # 检查中止信号
                if self.p.nn_sn1:
                    self.p.nn_sn1 = False
                    self.p.nn_workon = False
                    return
            # epoch结尾
            if self.p.nno1_acur:
                self.p.a1_rate = self.p.a1_cor / self.p.a1_num
                self.p.a1_cor = self.p.a1_cor.cpu().numpy().tolist()
                self.p.a1_rate = self.p.a1_rate.cpu().numpy().tolist()
                self.p.a1_nums.append(self.p.a1_num.cpu().numpy().tolist())
                self.p.a1_cors.append(self.p.a1_cor)
                self.p.a1_rates.append(self.p.a1_rate)
            if self.p.nno1_loss:
                self.p.a1_loss /= ds_len
                self.p.a1_loss = self.p.a1_loss.cpu().detach().item()
                self.p.a1_losses.append(self.p.a1_loss)
        # 完成工作
        if self.p.nno1_acur:
            self.p.nn_a1_ok = True
        if self.p.nno1_loss:
            self.p.nn_l1_ok = True
        self.p.nn_workon = False

    def NN_opa2(self):
        # 初始化
        self.p.nn_a2_ok = False
        self.p.nn_l2_ok = False
        self.p.a2_nums = []
        self.p.a2_cors = []
        self.p.a2_rates = []
        self.p.a2_losses = []
        self.p.a2_maxcor = 0
        self.p.a2_maxepo = -1
        self.p.a2_maxwf = {'s_net': deepcopy(self.p.nn_net.state_dict()),
                            's_opt': deepcopy(self.p.nn_opt.state_dict())}
        ds_lens = [len(self.p.dset_train), len(self.p.dset_test)]
        self.p.a2_num = torch.zeros(4, dtype=torch.int).cuda()
        self.p.a2_num[0] = self.p.a2_num[1] = ds_lens[0]
        self.p.a2_num[2] = self.p.a2_num[3] = ds_lens[1]
        # epoch层for循环
        for self.p.nn_nowepoch in range(2000):
            self.p.nn_nowbatch = 0
            dl_iter = enumerate(self.p.dload_train, 0)
            self.p.a2_cor = torch.zeros(4, dtype=torch.int).cuda()
            self.p.a2_loss = torch.zeros(1).cuda()
            self.p.nn_net.train(True)
            # 训练batch层for循环
            for self.p.nn_nowbatch, (self.p.nn_inputs, self.p.nn_inlabs) in dl_iter:
                # 消息打印
                # print('epoch:{:0>4d},batch:{:0>4d}'.format(self.p.nn_nowepoch + 1, self.p.nn_nowbatch + 1))
                # 训练一次
                self.p.nn_inputs = Variable(self.p.nn_inputs.cuda())
                self.p.nn_inlabs = Variable(self.p.nn_inlabs.cuda())
                self.p.nn_opt.zero_grad()
                self.p.nn_outputs = self.p.nn_net(self.p.nn_inputs)
                b_loss = self.p.nn_loss(self.p.nn_outputs, self.p.nn_inlabs)
                b_loss.backward()
                self.p.nn_opt.step()
                # 训练batch数据统计
                self.p.a2_top1 = torch.max(self.p.nn_outputs, dim=1).indices
                self.p.a2_ctop1 = (self.p.a2_top1 == self.p.nn_inlabs)
                self.p.a2_cor[0] += self.p.a2_ctop1.sum()
                self.p.a2_top5 = torch.topk(self.p.nn_outputs, 5, dim=1, largest=True).indices
                self.p.a2_lab5 = self.p.nn_inlabs.view(-1, 1)
                self.p.a2_cor[1] += torch.eq(self.p.a2_top5, self.p.a2_lab5).sum()
                self.p.a2_loss += b_loss
                # 检查中止信号
                if self.p.nn_sn2:
                    self.p.nn_sn2 = False
                    self.p.nn_workon = False
                    return
            self.p.nn_net.train(False)
            # 测试集batch
            for self.p.nn_inputs, self.p.nn_inlabs in self.p.dload_test:
                self.p.nn_inputs = Variable(self.p.nn_inputs.cuda())
                self.p.nn_inlabs = Variable(self.p.nn_inlabs.cuda())
                self.p.nn_outputs = self.p.nn_net(self.p.nn_inputs)
                # 测试集batch数据统计
                self.p.a2_top1 = torch.max(self.p.nn_outputs, dim=1).indices
                self.p.a2_ctop1 = (self.p.a2_top1 == self.p.nn_inlabs)
                self.p.a2_cor[2] += self.p.a2_ctop1.sum()
                self.p.a2_top5 = torch.topk(self.p.nn_outputs, 5, dim=1, largest=True).indices
                self.p.a2_lab5 = self.p.nn_inlabs.view(-1, 1)
                self.p.a2_cor[3] += torch.eq(self.p.a2_top5, self.p.a2_lab5).sum()
            # epoch数据统计
            self.p.a2_rate = self.p.a2_cor / self.p.a2_num
            self.p.a2_cor = self.p.a2_cor.cpu().numpy().tolist()
            self.p.a2_rate = self.p.a2_rate.cpu().numpy().tolist()
            self.p.a2_nums.append(self.p.a2_num.cpu().numpy().tolist())
            self.p.a2_cors.append(self.p.a2_cor)
            self.p.a2_rates.append(self.p.a2_rate)
            self.p.a2_loss /= ds_lens[0]
            self.p.a2_loss = self.p.a2_loss.cpu().detach().item()
            self.p.a2_losses.append(self.p.a2_loss)
            # 权重及判定
            if self.p.a2_cor[2] > self.p.a2_maxcor:
                self.p.a2_maxcor = self.p.a2_cor[2]
                self.p.a2_maxepo = self.p.nn_nowepoch
                self.p.a2_maxwf = {'s_net': deepcopy(self.p.nn_net.state_dict()),
                                    's_opt': deepcopy(self.p.nn_opt.state_dict())}
            elif self.p.nn_nowepoch >= (self.p.a2_maxepo + 20):
                break
        # 结尾
        self.p.nn_net.load_state_dict(self.p.a2_maxwf['s_net'])
        self.p.nn_opt.load_state_dict(self.p.a2_maxwf['s_opt'])
        self.p.nn_a2_ok = True
        self.p.nn_l2_ok = True
        self.p.nn_workon = False
