"""
@Time    :  2020/11/22 16:19
@Author  :  Sun_Z_Z  
@FileName:  train_model.py
@Institution:   Scut214
"""
import os

import cv2
import torch
from torch import nn, optim
import numpy as np
import torch.utils.data as data

txt_file = 'full_label.txt'


class RedDataset(data.Dataset):
    def __init__(self):
        # TODO
        # 1. Initialize file paths or a list of file names.
        self.root = r'H:\Dataset\huawei_reid\test.tar\test\test\gallery'
        self.imgs = np.loadtxt(txt_file, dtype=str, delimiter=':')

    def __getitem__(self, index, class_num=20):
        # TODO
        # 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open).
        # 2. Preprocess the data (e.g. torchvision.Transform).
        # 3. Return a data pair (e.g. image and label).
        name, label = self.imgs[index]
        img_path = os.path.join(self.root, name)
        img = cv2.imread(img_path)
        img = img.transpose(2, 0, 1)
        return img, int(label)

    def __len__(self):
        # You should change 0 to the total size of your dataset.
        return len(self.imgs)


class RedBuleNet(nn.Module):
    def __init__(self):
        super(RedBuleNet, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(3, 8, (3, 3), padding=1, stride=2),
            nn.BatchNorm2d(8),
            nn.ReLU(),
            nn.Conv2d(8, 16, (3, 3), padding=1, stride=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.Conv2d(16, 32, (3, 3), padding=1, stride=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 64, (3, 3), padding=1, stride=2),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d((16, 8)),
            nn.Flatten(),
            nn.Linear(64, 128),
            # nn.Dropout(0.5),
            nn.Linear(128, 2),
            # nn.Dropout(0.5),
            nn.Softmax(dim=1)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(3, 32, (3, 3), padding=1, stride=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 64, (3, 3), padding=1, stride=2),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 128, (3, 3), padding=1, stride=2),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 256, (3, 3), padding=1, stride=2),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.MaxPool2d((16, 8)),
            nn.Flatten(),
            nn.Linear(256, 512),
            # nn.Dropout(0.5),
            nn.Linear(512, 2),
            # nn.Dropout(0.5),
            nn.Softmax(dim=1)
        )

    def forward(self, x):
        x = self.conv(x)
        return x


if __name__ == '__main__':
    EPOCHS = 200
    train_dataset = RedDataset()
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8, shuffle=False, pin_memory=(torch.cuda.is_available()), num_workers=0)
    net = RedBuleNet()
    net.cuda()
    loss_func = nn.CrossEntropyLoss()
    net.train()
    # optimizer_ft = optim.SGD(net.parameters(), lr=0.00001, momentum=0.9, nesterov=True)
    optimizer_ft = optim.Adam(net.parameters(), lr=0.000001)
    for e in range(EPOCHS):
        for step, data in enumerate(train_loader):
            imgs, one_hot = data  # input 是一个tensor    name 是一个列表
            # img = torch.transpose(imgs,[0,3,1,2])
            imgs = imgs.type(torch.float)
            optimizer_ft.zero_grad()
            logits = net(imgs.cuda())
            loss = loss_func(logits, one_hot.long().cuda())
            loss.backward()
            optimizer_ft.step()
            if step % 20 == 0:
                print("Epoch:%d loss:%.5f" % (e, loss))
    torch.save(net, 'RedBuleNet-Adam-0.01.pt')

    # print(x.shape)
    # print(imgs.shape,type(imgs))
    # test_img = imgs[0].cpu().numpy()
    # print(labels[0])
    # cv2.imshow('ret', test_img)
    # cv2.waitKey(0)
    # break
