from threading import Thread
from torchvision.transforms import Compose, ToPILImage, Normalize
from NNserver.myDset import BF75_Dset
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch
from NNserver.myFCNN import AlexNet
from torch.nn.functional import softmax
from torch.nn import CrossEntropyLoss
from torch.nn.parallel import DataParallel


DL_BSIZE = 512
DEVICE_IDS = [0, 1]


class FC_Para():
    def __init__(self):
        # 状态变量 - 数据读取
        self.dset_on = False        # 数据集是否已加载
        self.dset_path = ""         # 数据集根目录（从客户端获取）
        self.dset_train = None      # 训练集
        self.dset_test = None       # 测试集
        self.dset_valid = None      # 验证集
        self.dload_train = None     # 训练集加载器
        self.dload_test = None      # 测试集加载器
        self.dload_valid = None     # 验证集加载器
        # 状态变量 - 样本显示
        self.sam_index = None       # 样本图来源（从客户端获取）
        self.sam_count = None       # 样本图序号（从客户端获取）
        # 样本图自数据集取出后，再次还原为PILImage（相当于数据集的逆变换）
        self.sam_trans = Compose(
            [Normalize([-2.5, -2.5, -2.5], [5.0, 5.0, 5.0]), ToPILImage()])
        self.sam_tsr = None         # 样本图（Tensor形式）
        self.sam_img = None         # 样本图（PILImage形式）
        self.sam_tar = None         # 样本图的label（0~74）
        self.sam_str = ''           # 样本图的label（字符串）
        # 状态变量 - 权重文件
        self.nn_wsave = False       # 样本图是存还是取
        self.nn_wfile = None        # 权重文件
        self.nn_lw_ok = False       # 权重文件是否读取成功
        # 状态变量 - 网络状态
        self.nn_workon = False      # AlexNet是否在运行
        self.nn_nowepoch = 0        # 当前epoch
        self.nn_nowbatch = 0        # 当前batch
        self.nn_stopnow = False     # 网络终止信号
        self.nn_acur_ok = False     # accuracy数据是否可读取
        self.nn_loss_ok = False     # loss数据是否可读取
        self.nn_one_ok = False      # 不用管
        # 状态变量 - 工作参数
        self.nn_input = 0           # 网络跑哪个数据集（从客户端获取）
        self.nn_needback = False    # 是否反向传播训练
        self.nn_ifstatis = False    # 是否统计accuracy数据
        self.nn_epoch = 1           # 执行几个epoch
        self.nn_needloss = False    # 是否统计loss数据
        # 状态变量 - 网络数据
        self.nn_net = AlexNet().cuda()      # 网络本体
        self.nn_net = DataParallel(self.nn_net, device_ids=DEVICE_IDS)  # 双GPU训练（服务器上是两个1080Ti）
        self.nn_loss = CrossEntropyLoss()   # 交叉熵损失函数
        self.nn_lr = 0.0005     # 学习率
        self.nn_opt = torch.optim.SGD(self.nn_net.parameters(), lr=self.nn_lr * DL_BSIZE)   # 优化器根据batch_size自适应
        self.nn_inputs = None       # 一个batch中的图像数据（n*3*224*224）
        self.nn_inlabs = None       # 一个batch中的图像label（n*75）
        self.nn_outputs = None      # 网络的输出（n*75）
        self.nn_onelabs = None      # 不用管
        self.nn_oneouts = None      # 不用管
        # 状态变量 - 网络数据统计
        self.sta_top1 = None        # 临时统计量
        self.sta_top5 = None        # 临时统计量
        self.sta_correct = None     # 临时统计量
        self.sta_epo_num = None     # 一个epoch中的样本总数，分top1和top5
        self.sta_epo_cor = None     # 一个epoch中的正确数
        self.sta_epo_rate = None    # 一个epoch中的正确率
        self.sta_nums = None        # 列表，存每个epoch的总数
        self.sta_cors = None        # 列表，存每个epoch的正确数
        self.sta_rates = None       # 列表，存每个epoch的正确率
        self.sta_bth_loss = None    # 当前batch中的loss
        self.sta_epo_loss = None    # 一个epoch中loss的平均值
        self.sta_losses = None      # 列表，存每个epoch的loss值
        # 线程调度
        self.th = None      # 线程对象

    def Th_run(self, ths):
        self.th = FC_Thread(self, ths)
        self.th.start()

# python的线程对象只能跑一次，因此每次都要创建一个新的线程对象
class FC_Thread(Thread):
    def __init__(self, obj, ths):
        super(FC_Thread, self).__init__()
        # 线程控制
        self.p = obj            # 反指FC_Para()
        self.thr_state = ths    # 获知要执行的内容
        self.thr_ok = False     # 表示线程是否已执行完成

    def run(self):
        # 0:初始化状态
        # 1:数据读取
        # 2:样本展示
        # 3:存取权重文件
        # 4:神经网络运行
        if self.thr_state == 1:
            self.Dset_init()
        elif self.thr_state == 2:
            self.Sam_get()
        elif self.thr_state == 3:
            self.Wfile_sl()
        elif self.thr_state == 4:
            self.NN_opa()
        self.thr_ok = True

    def Dset_init(self):
        try:
            self.p.dset_train = BF75_Dset(self.p.dset_path, 'train')
            self.p.dset_test = BF75_Dset(self.p.dset_path, 'test')
            self.p.dset_valid = BF75_Dset(self.p.dset_path, 'valid')
        except Exception as e:
            print(e)
            self.p.dset_on = False
            return
        self.p.dload_train = DataLoader(
            self.p.dset_train, batch_size=DL_BSIZE, shuffle=True, drop_last=False)
        self.p.dload_test = DataLoader(
            self.p.dset_test, batch_size=DL_BSIZE, shuffle=False, drop_last=False)
        self.p.dload_valid = DataLoader(
            self.p.dset_valid, batch_size=DL_BSIZE, shuffle=False, drop_last=False)
        self.p.dset_on = True

    def Sam_get(self):
        if self.p.sam_index == 0:
            self.p.sam_tsr, self.p.sam_tar = self.p.dset_train[self.p.sam_count]
        elif self.p.sam_index == 1:
            self.p.sam_tsr, self.p.sam_tar = self.p.dset_test[self.p.sam_count]
        elif self.p.sam_index == 2:
            self.p.sam_tsr, self.p.sam_tar = self.p.dset_valid[self.p.sam_count]
        self.p.sam_img = self.p.sam_trans(self.p.sam_tsr)
        self.p.sam_tsr = self.p.sam_tsr.cuda()
        self.p.sam_tar = torch.tensor(self.p.sam_tar).cuda()
        self.p.sam_str = self.p.dset_train.classes[self.p.sam_tar]

    def Wfile_sl(self):
        if self.p.nn_wsave:
            self.p.nn_wfile = {'s_net': self.p.nn_net.state_dict(),
                                's_opt': self.p.nn_opt.state_dict()}
        else:
            try:
                self.p.nn_net.load_state_dict(self.p.nn_wfile['s_net'])
                self.p.nn_opt.load_state_dict(self.p.nn_wfile['s_opt'])
            except Exception as e:
                print(e)
                self.p.nn_lw_ok = False
            else:
                self.p.nn_lw_ok = True

    def NN_opa(self):
        self.p.nn_workon = True
        self.p.nn_stopnow = False
        self.p.nn_acur_ok = False
        self.p.nn_loss_ok = False
        self.p.nn_one_ok = False
        self.p.sta_nums = []
        self.p.sta_cors = []
        self.p.sta_rates = []
        self.p.sta_losses = []
        # 若不进行反向，则设置train为False，加快速度
        self.p.nn_net.train(self.p.nn_needback)
        for self.p.nn_nowepoch in range(self.p.nn_epoch):
            self.p.nn_nowbatch = 0
            print('epoch:{:0>4d}'.format(self.p.nn_nowepoch))
            # 确定迭代器
            if self.p.nn_input == 0:
                loader_iter = enumerate([0], 0)
            elif self.p.nn_input == 1:
                loader_iter = enumerate(self.p.dload_train, 0)
            elif self.p.nn_input == 2:
                loader_iter = enumerate(self.p.dload_test, 0)
            elif self.p.nn_input == 3:
                loader_iter = enumerate(self.p.dload_valid, 0)
            # 准备数据统计
            if self.p.nn_ifstatis:
                self.p.sta_epo_num = torch.zeros(2, dtype=torch.int).cuda()
                self.p.sta_epo_cor = torch.zeros(2, dtype=torch.int).cuda()
            if self.p.nn_needloss:
                self.p.sta_epo_loss = torch.zeros(1).cuda()
            for self.p.nn_nowbatch, datas in loader_iter:
                # 消息打印
                # print('epoch:{:0>4d},batch:{:0>4d}'.format(self.p.nn_nowepoch, self.p.nn_nowbatch))
                # 载入初始数据
                if self.p.nn_input == 0:
                    self.p.nn_inputs = Variable(self.p.sam_tsr.unsqueeze(0))
                    self.p.nn_inlabs = Variable(self.p.sam_tar.unsqueeze(0))
                else:
                    self.p.nn_inputs, self.p.nn_inlabs = datas
                    self.p.nn_inputs = Variable(self.p.nn_inputs.cuda())
                    self.p.nn_inlabs = Variable(self.p.nn_inlabs.cuda())
                # 前向与后向
                if self.p.nn_needback:
                    self.p.nn_opt.param_groups[0]['lr'] = self.p.nn_lr * self.p.nn_inlabs.size(dim=0)
                    self.p.nn_opt.zero_grad()
                self.p.nn_outputs = self.p.nn_net(self.p.nn_inputs)
                if self.p.nn_needback:
                    self.p.sta_bth_loss = self.p.nn_loss(self.p.nn_outputs, self.p.nn_inlabs)
                    print(self.p.sta_bth_loss.cpu().detach().item())
                    if self.p.sta_bth_loss.cpu().detach().item() >= 10000.0:
                        print(self.p.nn_outputs[0])
                        self.p.nn_stopnow = True
                    self.p.sta_bth_loss.backward()
                    self.p.nn_opt.step()
                # batch数据统计
                if self.p.nn_ifstatis:
                    self.p.sta_epo_num[0] += self.p.nn_inlabs.size(dim=0)
                    self.p.sta_epo_num[1] += self.p.nn_inlabs.size(dim=0)
                    self.p.sta_top1 = torch.max(self.p.nn_outputs, dim=1).indices
                    self.p.sta_correct = (self.p.sta_top1 == self.p.nn_inlabs)
                    self.p.sta_epo_cor[0] += self.p.sta_correct.sum()
                    self.p.sta_top5 = torch.topk(self.p.nn_outputs, 5, dim=1, largest=True).indices
                    self.p.nn_inlabs = self.p.nn_inlabs.view(-1, 1)
                    self.p.sta_epo_cor[1] += torch.eq(self.p.sta_top5, self.p.nn_inlabs).sum()
                if self.p.nn_needloss:
                    self.p.sta_epo_loss += self.p.sta_bth_loss
                # 检查中止信号
                if self.p.nn_stopnow:
                    self.p.nn_stopnow = False
                    self.p.nn_workon = False
                    return
            # epoch结尾
            if self.p.nn_input == 0:
                self.p.nn_onelabs = torch.zeros(75)
                self.p.nn_onelabs[self.p.sam_tar] = 1.0
                self.p.nn_onelabs = self.p.nn_onelabs.numpy().tolist()
                self.p.nn_oneouts = self.p.nn_outputs.cpu().detach().squeeze(0)
                self.p.nn_oneouts = softmax(self.p.nn_oneouts, dim=0)
                self.p.nn_oneouts = self.p.nn_oneouts.numpy().tolist()
            if self.p.nn_ifstatis:
                self.p.sta_epo_rate = self.p.sta_epo_cor / self.p.sta_epo_num
                self.p.sta_epo_num = self.p.sta_epo_num.cpu().numpy().tolist()
                self.p.sta_epo_cor = self.p.sta_epo_cor.cpu().numpy().tolist()
                self.p.sta_epo_rate = self.p.sta_epo_rate.cpu().numpy().tolist()
                self.p.sta_nums.append(self.p.sta_epo_num)
                self.p.sta_cors.append(self.p.sta_epo_cor)
                self.p.sta_rates.append(self.p.sta_epo_rate)
            if self.p.nn_needloss:
                self.p.sta_epo_loss /= (self.p.nn_nowbatch + 1)
                self.p.sta_epo_loss = self.p.sta_epo_loss.cpu().detach().item()
                self.p.sta_losses.append(self.p.sta_epo_loss)
        # 完成工作
        if self.p.nn_ifstatis:
            self.p.nn_acur_ok = True
        if self.p.nn_needloss:
            self.p.nn_loss_ok = True
        if self.p.nn_input == 0:
            self.p.nn_one_ok = True
        self.p.nn_workon = False
