from __future__ import print_function

import os
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
plt.ion()   # 交互模式

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def get_image_num(path, train_own_data):
    num = 0
    if train_own_data:
        train_path = os.path.join(path, 'images_background')
        for character in os.listdir(train_path):
            #----------------------------------------------------#
            #   在大众类下遍历小种类。
            #----------------------------------------------------#
            # i = i + 1
            character_path = os.path.join(train_path, character)
            # if len(os.listdir(character_path)) >2:
            #     m.append(character_path)
            num += len(os.listdir(character_path))
    else:
        train_path = os.path.join(path, 'images_background')
        for alphabet in os.listdir(train_path):
            #-------------------------------------------------------------#
            #   然后遍历images_background下的每一个文件夹，代表一个大种类
            #-------------------------------------------------------------#
            alphabet_path = os.path.join(train_path, alphabet)
            print(alphabet_path)
            for character in os.listdir(alphabet_path):
                # ----------------------------------------------------#
                #   在大众类下遍历小种类。
                # ----------------------------------------------------#
                # print(os.path.join(alphabet_path, character))
                character_path = os.path.join(alphabet_path, character)
                num += len(os.listdir(character_path))
    return num

input_shape = [448, 448, 3]
dataset_path = "../datasets"

train_ratio = 0.9
images_num = get_image_num(dataset_path, True)  # 所有图片的数量
num_train = int(images_num * train_ratio)  # 训练集图片总数
num_val = images_num - num_train  # 验证集图片数量

from utils.dataloader import SiameseDataset, dataset_collate

train_dataset   = SiameseDataset(input_shape, dataset_path, num_train, num_val, train=True, train_own_data=True)
val_dataset     = SiameseDataset(input_shape, dataset_path, num_train, num_val, train=False, train_own_data=True)
train_loader = DataLoader(train_dataset,batch_size=4,num_workers=0, pin_memory=True, drop_last=False, collate_fn=dataset_collate)
test_loader = DataLoader(val_dataset, batch_size=4, shuffle=True, num_workers=0)

class myNet(nn.Module):
    def __init__(self):
        super(myNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 10)
        # 空间变换器定位 - 网络
        self.localization = nn.Sequential(
            nn.Conv2d(3, 8, kernel_size=5,padding=1,stride=2),
            nn.MaxPool2d(2, stride=2),
            nn.ReLU(True),
            nn.Conv2d(8, 16, kernel_size=5,padding=1,stride=2),
            nn.MaxPool2d(2, stride=2),
            nn.ReLU(True),
            nn.Conv2d(16, 32, kernel_size=3, stride=1),
            nn.MaxPool2d(2, stride=2),
            nn.ReLU(True),
            nn.Conv2d(32, 32, kernel_size=2, stride=1),
            nn.MaxPool2d(2, stride=1),
            nn.ReLU(True)
        )
        # 3 * 2 affine矩阵的回归量
        self.fc_loc = nn.Sequential(
            nn.Linear(32 * 3 * 3, 32),
            nn.ReLU(True),
            nn.Linear(32, 3 * 2)
        )
        # 使用身份转换初始化权重/偏差
        self.fc_loc[2].weight.data.zero_()
        self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))#网络层参数初始化，默认图片保持原样

    # 空间变换器网络转发功能
    def stn(self, x):
        xs = self.localization(x)
        xs = xs.view(-1, 32 * 3 * 3)#-1为占位
        theta = self.fc_loc(xs)
        theta = theta.view(-1, 2, 3)
        grid = F.affine_grid(theta, x.size())#计算初步调整坐标
        x = F.grid_sample(x, grid)#根据自己定义的填充规则（一般用双线性插值）来填充
        return x

    def forward(self, x):
        # transform the input
        x0 = x[0]#左列和右列
        x1 = x[1]

        x0 = self.stn(x0)
        x1 = self.stn(x1)



        # 执行一般的前进传递
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)

model = myNet().to(device)

optimizer = optim.SGD(model.parameters(), lr=0.01)

def train(epoch):
    model.train()
    # for batch_idx, batch in enumerate(train_loader):
    for batch_idx, batch in enumerate(train_loader):

        data, target = batch[0], batch[1]
        with torch.no_grad():
            data = torch.from_numpy(data).type(torch.FloatTensor)
            target = torch.from_numpy(target).type(torch.FloatTensor)

        data = F.interpolate(data, size=(3, 224, 224), mode='nearest')
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % 500 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))

for epoch in range(1, 5 + 1):
    train(epoch)
    # test()