
"""
implement a shuffleNet by pytorch
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
from tqdm import tqdm
from config import opt
from visualize import Visualizer

dtype = torch.FloatTensor
device = ("cuda:0" if torch.cuda.is_available() else "cpu")

# step2: data
trans = transforms.Compose([
    transforms.Grayscale(num_output_channels=1),
    transforms.Resize([224, 224]),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.2458], std=[0.0612])
])

train_data = ImageFolder(opt.train_data_root, transform=trans)
val_data = ImageFolder(opt.validate_data_root, transform=trans)
train_dataloader = DataLoader(train_data, opt.batch_size, shuffle=True)
test_data = ImageFolder(opt.test_data_root, transform=trans)
test_dataloader = DataLoader(test_data, opt.batch_size, shuffle=True)


def shuffle_channels(x, groups):
    """shuffle channels of a 4-D Tensor"""
    batch_size, channels, height, width = x.size()
    assert channels % groups == 0
    channels_per_group = channels // groups
    # split into groups
    x = x.view(batch_size, groups, channels_per_group,
               height, width)
    # transpose 1, 2 axis
    x = x.transpose(1, 2).contiguous()
    # reshape into orignal
    x = x.view(batch_size, channels, height, width)
    return x

class ShuffleNetUnitA(nn.Module):
    """ShuffleNet unit for stride=1"""
    def __init__(self, in_channels, out_channels, groups=3):
        super(ShuffleNetUnitA, self).__init__()
        assert in_channels == out_channels
        assert out_channels % 4 == 0
        bottleneck_channels = out_channels // 4
        self.groups = groups
        self.group_conv1 = nn.Conv2d(in_channels, bottleneck_channels,
                                        1, groups=groups, stride=1)
        self.bn2 = nn.BatchNorm2d(bottleneck_channels)
        self.depthwise_conv3 = nn.Conv2d(bottleneck_channels,
                                         bottleneck_channels,
                                         3, padding=1, stride=1,
                                         groups=bottleneck_channels)
        self.bn4 = nn.BatchNorm2d(bottleneck_channels)
        self.group_conv5 = nn.Conv2d(bottleneck_channels, out_channels,
                                     1, stride=1, groups=groups)
        self.bn6 = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        out = self.group_conv1(x)
        out = F.relu(self.bn2(out))
        out = shuffle_channels(out, groups=self.groups)
        out = self.depthwise_conv3(out)
        out = self.bn4(out)
        out = self.group_conv5(out)
        out = self.bn6(out)
        out = F.relu(x + out)
        return out

class ShuffleNetUnitB(nn.Module):
    """ShuffleNet unit for stride=2"""
    def __init__(self, in_channels, out_channels, groups=3):
        super(ShuffleNetUnitB, self).__init__()
        out_channels -= in_channels
        assert out_channels % 4 == 0


        bottleneck_channels = out_channels // 4
        self.groups = groups
        self.group_conv1 = nn.Conv2d(in_channels, bottleneck_channels,
                                     1, groups=groups, stride=1)
        self.bn2 = nn.BatchNorm2d(bottleneck_channels)
        self.depthwise_conv3 = nn.Conv2d(bottleneck_channels,
                                         bottleneck_channels,
                                         3, padding=1, stride=2,
                                         groups=bottleneck_channels)
        self.bn4 = nn.BatchNorm2d(bottleneck_channels)
        self.group_conv5 = nn.Conv2d(bottleneck_channels, out_channels,
                                     1, stride=1, groups=groups)
        self.bn6 = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        out = self.group_conv1(x)
        out = F.relu(self.bn2(out))
        out = shuffle_channels(out, groups=self.groups)
        out = self.depthwise_conv3(out)
        out = self.bn4(out)
        out = self.group_conv5(out)
        out = self.bn6(out)
        x = F.avg_pool2d(x, 3, stride=2, padding=1)
        out = F.relu(torch.cat([x, out], dim=1))
        return out

class ShuffleNet(nn.Module):
    """ShuffleNet for groups=3"""
    def __init__(self, groups=3, in_channels=1, num_classes=2):
        super(ShuffleNet, self).__init__()

        self.conv1 = nn.Conv2d(in_channels, 24, 3, stride=2, padding=1)
        stage2_seq = [ShuffleNetUnitB(24, 240, groups=3)] + \
            [ShuffleNetUnitA(240, 240, groups=3) for i in range(3)]
        self.stage2 = nn.Sequential(*stage2_seq)
        stage3_seq = [ShuffleNetUnitB(240, 480, groups=3)] + \
            [ShuffleNetUnitA(480, 480, groups=3) for i in range(7)]
        self.stage3 = nn.Sequential(*stage3_seq)
        stage4_seq = [ShuffleNetUnitB(480, 960, groups=3)] + \
                     [ShuffleNetUnitA(960, 960, groups=3) for i in range(3)]
        self.stage4 = nn.Sequential(*stage4_seq)
        self.fc = nn.Linear(960, num_classes)

    def forward(self, x):
        net = self.conv1(x)
        net = F.max_pool2d(net, 3, stride=2, padding=1)
        net = self.stage2(net)
        net = self.stage3(net)
        net = self.stage4(net)
        net = F.avg_pool2d(net, 7)
        net = net.view(net.size(0), -1)
        net = self.fc(net)
        logits = F.softmax(net)
        return logits

model = ShuffleNet()
def train():
    # step1: model

    #model.cuda()


    # step3: criterion and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    lr = opt.lr
    optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=opt.weight_decay)

    # train
    for epoch in range(opt.max_epoch):
        loss_runtime,total = 0,0
        for ii,(data,label) in enumerate(tqdm(train_dataloader)):
            #input = data.cuda()
            #target = label.cuda()
            input = data
            target = label
            total += target.size(0)
            optimizer.zero_grad()
            score = model(input)
            loss = criterion(score,target)
            loss.backward()
            optimizer.step()
            loss_runtime += loss.item()
        loss_runtime /= total
        print("loss is :%5f" % loss_runtime)
        if(epoch%5==0):
            torch.save(model,"./snapshot/loss={}.pth".format(loss_runtime))


def test():
    correct,total=0,0
    FN,FP,TP,TN=0,0,0,0
    with torch.no_grad():
        for (x,y) in test_dataloader:
            x = x.to(device)
            y = y.to(device)
            y_pred = model(x)
            _,prediction = torch.max(y_pred.data,dim=1)

            # acc
            correct += (prediction==y).sum().item()
            total += y.size(0)

            # 漏判误判
            zes = Variable(torch.zeros(64).type(torch.LongTensor))  # 全0变量
            ons = Variable(torch.ones(64).type(torch.LongTensor))  # 全1变量
            train_correct01 = ((prediction == zes) & (y == ons)).sum()  # 原标签为1，预测为 0 的总数

            train_correct10 = ((prediction == ons) & (y == zes)).sum()  # 原标签为0，预测为1 的总数

            train_correct11 = ((prediction == ons) & (y == ons)).sum()
            train_correct00 = ((prediction == zes) & (y == zes)).sum()

            FN += train_correct01
            FP += train_correct10
            TP += train_correct11
            TN += train_correct00
        acc = correct/total
        print("accuracy on test set is :%5f" % acc)
        lp = FN / total
        wp = FP / total
        print("误判：{}".format(wp))
        print("漏判：{}".format(lp))



if __name__ == "__main__":
   #train()
    model = torch.load("snapshot/loss=0.005085706725716591.pth")
    test()
