from threading import Thread
from torchvision.transforms import Compose, ToPILImage, Normalize
from torch.autograd import Variable
import torch
from NNserver.myNN import CapsNet
from torch.nn.parallel import DataParallel


DEVICE_IDS = [0]


class FC_Para():
    def __init__(self):
        # 状态变量 - 数据读取
        self.dset_on = False
        self.cfp_path = ""
        self.dset_rcfp = None
        self.dset_vcfp = None
        self.dload_rcfp = None
        self.dload_vcfp = None
        # 状态变量 - 样本显示
        self.sam_idx = 0
        self.sam_trans = Compose([Normalize([-2.5, -2.5, -2.5], [5.0, 5.0, 5.0]), ToPILImage()])
        self.sam_tsr = None
        self.sam_img = None
        # 状态变量 - 权重文件
        self.nn_wsave = False
        self.nn_wfile = None
        self.nn_wfok = False
        # 状态变量 - 网络运行参数
        self.nnt_epoch = 1
        # 状态变量 - 网络状态
        self.nn_workon = False
        self.nn_mode = 1
        self.nn_nowepoch = 0
        self.nn_nowbatch = 0
        self.nn_sn = False
        self.nn_aok = False
        self.nn_lok = False
        # 状态变量 - 网络数据
        self.nn_net = DataParallel(CapsNet().cuda(), device_ids=DEVICE_IDS)
        self.nn_lr = 0.0001
        self.nn_opt = torch.optim.Adam(self.nn_net.parameters(), lr=self.nn_lr)
        self.nn_inputs = None
        self.nn_flags = None
        self.nn_blocks = None
        self.nn_outputs = None
        self.nn_outcor = None
        # 状态变量 - 网络数据统计
        self.a_num = None
        self.a_cor = None
        self.a_rate = None
        self.a_loss = None
        self.a_losses = None
        # 线程调度
        self.th = None

    def Th_run(self, ths):
        self.th = FC_Thread(self, ths)
        self.th.start()


class FC_Thread(Thread):
    def __init__(self, obj, ths):
        super(FC_Thread, self).__init__()
        # 线程控制
        self.p = obj
        self.thr_state = ths

    def run(self):
        # 1:神经网络训练
        # 2:神经网络CFP验证
        if self.thr_state == 1:
            self.NN_train()
        elif self.thr_state == 2:
            self.NN_vcfp()

    def NN_train(self):
        self.p.nn_lok = False
        self.p.a_losses = []
        self.p.nn_net.train()
        ds_len = len(self.p.dset_rcfp)
        # epoch层for循环
        for self.p.nn_nowepoch in range(self.p.nnt_epoch):
            self.p.nn_nowbatch = 0
            dl_iter = enumerate(self.p.dload_rcfp, 0)
            # 准备数据统计
            self.p.a_loss = torch.zeros(1).cuda()
            for self.p.nn_nowbatch, self.p.nn_inputs in dl_iter:
                # 消息打印
                # print('epoch:{:0>4d},batch:{:0>4d}'.format(self.p.nn_nowepoch + 1, self.p.nn_nowbatch + 1))
                # 载入初始数据
                self.p.nn_inputs = Variable(self.p.nn_inputs.view(-1, 3, 112, 112).cuda())
                # 前向与后向
                self.p.nn_opt.zero_grad()
                self.p.nn_outputs = self.p.nn_net(self.p.nn_inputs)
                b_loss = self.p.nn_net.module.MyLoss(self.p.nn_outputs)
                b_loss.backward()
                print(b_loss)
                self.p.nn_opt.step()
                # batch数据统计
                self.p.a_loss += b_loss
                # 检查中止信号
                if self.p.nn_sn:
                    self.p.nn_sn = False
                    self.p.nn_inputs = None
                    self.p.nn_outputs = None
                    self.p.nn_workon = False
                    return
            # epoch结尾
            self.p.a_loss /= ds_len
            self.p.a_loss = self.p.a_loss.cpu().detach().item()
            self.p.a_losses.append(self.p.a_loss)
        # 完成工作
        self.p.nn_inputs = None
        self.p.nn_outputs = None
        self.p.nn_lok = True
        self.p.nn_workon = False

    def NN_vcfp(self):
        # 初始化
        self.p.nn_aok = False
        self.p.nn_nowepoch = 0
        self.p.nn_nowbatch = 0
        ds_len = len(self.p.dset_vcfp)
        self.p.a_num = torch.zeros(11, dtype=torch.int).cuda()
        self.p.a_num[0] = self.p.a_num[1] = self.p.a_num[2] = self.p.a_num[3] = self.p.a_num[4] =\
            self.p.a_num[5] = self.p.a_num[6] = self.p.a_num[7] = self.p.a_num[8] = self.p.a_num[9] = ds_len // 10
        self.p.a_num[10] = ds_len
        dl_iter = enumerate(self.p.dload_vcfp, 0)
        self.p.a_cor = torch.zeros(11, dtype=torch.int).cuda()
        self.p.nn_net.eval()
        # CFP验证batch层for循环
        for self.p.nn_nowbatch, (self.p.nn_inputs, self.p.nn_flags, self.p.nn_blocks) in dl_iter:
            # 消息打印
            print('epoch:{:0>4d},batch:{:0>4d}'.format(self.p.nn_nowepoch + 1, self.p.nn_nowbatch + 1))
            # 训练一次
            self.p.nn_inputs = Variable(self.p.nn_inputs.view(-1, 3, 112, 112).cuda())
            self.p.nn_flags = Variable(self.p.nn_flags.cuda())
            self.p.nn_blocks = Variable(self.p.nn_blocks.cuda())
            self.p.nn_outputs = self.p.nn_net(self.p.nn_inputs)
            # CFP验证batch数据统计
            self.p.nn_outcor = self.p.nn_net.module.MyCor(self.p.nn_outputs, self.p.nn_flags)
            for i in range(10):
                self.p.a_cor[i] += ((self.p.nn_blocks == i) & self.p.nn_outcor).sum()
            # 检查中止信号
            if self.p.nn_sn:
                self.p.nn_sn = False
                self.p.nn_inputs = None
                self.p.nn_flags = None
                self.p.nn_blocks = None
                self.p.nn_outputs = None
                self.p.nn_outcor = None
                self.p.nn_workon = False
                return
        # epoch数据统计
        self.p.a_cor[10] = self.p.a_cor.sum()
        self.p.a_rate = self.p.a_cor / self.p.a_num
        self.p.a_num = self.p.a_num.cpu().numpy().tolist()
        self.p.a_cor = self.p.a_cor.cpu().numpy().tolist()
        self.p.a_rate = self.p.a_rate.cpu().numpy().tolist()
        # 结尾
        self.p.nn_inputs = None
        self.p.nn_flags = None
        self.p.nn_blocks = None
        self.p.nn_outputs = None
        self.p.nn_outcor = None
        self.p.nn_aok = True
        self.p.nn_workon = False
