import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as pyplot
import time
import copy
import os.path

model_save_path = os.path.realpath('pytorch/jizhi/figure_identify/figure_identify_cnn_model')

data_path = os.path.realpath(r'pytorch/jizhi/figure_plus/data')
image_size = 224

def rightness(predictions, labels):
    pred = torch.max(predictions.data, 1)[1]
    rights = pred.eq(labels.data.view_as(pred)).sum()
    return rights, len(labels)

class TranNet():
    def __init__(self):
        super(TranNet, self).__init__()
        
        self.train_dataset = datasets.ImageFolder(os.path.join(data_path, 'train'), transforms.Compose([
            transforms.RandomCrop(image_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]))
        self.verify_dataset = datasets.ImageFolder(os.path.join(data_path, 'verify'), transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(image_size),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]))
        self.train_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=4, shuffle=True, num_workers=4)
        self.verify_loader = torch.utils.data.DataLoader(self.verify_dataset, batch_size=4, shuffle=True, num_workers=4)
        self.num_classes = len(self.train_dataset.classes)
    
    def exec(self):
        self.model_prepare()
        
    def model_prepare(self):
        net = models.resnet18(pretrained=True)
        
        # jusge whether GPU
        use_cuda = torch.cuda.is_available()
        dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
        itype = torch.cuda.LongTensor if use_cuda else torch.LongTensor
        net = net.cuda() if use_cuda else net
        # float net values
        num_features = net.fc.in_features
        net.fc = nn.Linear(num_features, 2)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)
        
        # fixed net values
        '''
        for param in net.parameters():
            param.requires_grad = False
        num_features = net.fc.in_features
        net.fc = nn.Linear(num_features, 2)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.SGD(net.fc.parameters(), lr = 0.001, momentum=0.9)
        '''
        record = []
        num_epochs = 3
        net.train(True) # open dropout
        for epoch in range(num_epochs):
            train_rights = []
            train_losses = []
            for batch_index, (data, target) in enumerate(self.train_loader):
                data, target = data.clone().detach().requires_grad_(True), target.clone().detach()
                output = net(data)
                loss = criterion(output, target)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                right = rightness(output, target)
                train_rights.append(right)
                train_losses.append(loss.data.numpy())
                if batch_index % 400 == 0:
                    verify_rights = []
                    for index, (data_v, target_v) in enumerate(self.verify_loader):
                        data_v, target_v = data_v.clone().detach(), target_v.clone().detach()
                        output_v = net(data_v)
                        right = rightness(output_v, target_v)
                        verify_rights.append(right)
                    verify_accu = sum([row[0] for row in verify_rights]) / sum([row[1] for row in verify_rights])
                    record.append((verify_accu))
                    print(f'verify data accu:{verify_accu}')
        # plot
        pyplot.figure(figsize=(8, 6))
        pyplot.plot(record)
        pyplot.xlabel('step')
        pyplot.ylabel('verify loss')
        pyplot.show()

num_classes = 10

class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        
        self.depth = [4, 8] # number of conv core in every layer
        self.conv1 = nn.Conv2d(1, 4, 5, padding=2)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(self.depth[0], self.depth[1], 5, padding=2)
        self.fc1 = nn.Linear((image_size // 4)**2 * self.depth[1], 512)
        self.fc2 = nn.Linear(512, num_classes)
        
    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)
        x = self.pool(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = self.pool(x)
        
        x = x.view(-1, (image_size // 4)**2 * self.depth[1])
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        x = F.log_softmax(x, dim=1)
        return x
    
    def retrieve_features(self, x):
        feature_map1 = F.relu(self.conv1(x))
        x = self.pool(feature_map1)
        feature_map2 = F.relu(self.conv2(x))
        return feature_map1, feature_map2

class FigurePlusNet(nn.Module):
    def __init__(self):
        super(FigurePlusNet, self).__init__()
        self.image_size = 28
        self.num_classes = 10
        self.num_epochs = 6
        self.batch_size = 64
        
        # data prepare
        self.train_dataset = datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
        self.test_dataset = datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor())
        
        sampler_a = torch.utils.data.sampler.SubsetRandomSampler(np.random.permutation(range(len(self.train_dataset))))
        sampler_b = torch.utils.data.sampler.SubsetRandomSampler(np.random.permutation(range(len(self.train_dataset))))
        
        self.train_loader_a = torch.utils.data.DataLoader(dataset=self.train_dataset, batch_size=self.batch_size, shuffle=False, sampler=sampler_a)
        self.train_loader_b = torch.utils.data.DataLoader(dataset=self.train_dataset, batch_size=self.batch_size, shuffle=False, sampler=sampler_b)
        
        self.verify_size = 5000
        verify_index_a = range(self.verify_size)
        verify_index_b = np.random.permutation(range(self.verify_size))
        test_index_a = range(self.verify_size, len(self.test_dataset))
        test_index_b = np.random.permutation(test_index_a)
        
        verify_sampler_a = torch.utils.data.sampler.SubsetRandomSampler(verify_index_a)
        verify_sampler_b = torch.utils.data.sampler.SubsetRandomSampler(verify_index_b)
        test_sampler_a = torch.utils.data.sampler.SubsetRandomSampler(test_index_a)
        test_sampler_b = torch.utils.data.sampler.SubsetRandomSampler(test_index_b)
        
        self.verify_loader_a = torch.utils.data.DataLoader(dataset=self.test_dataset, batch_size=self.batch_size, shuffle=False, sampler=verify_sampler_a)
        self.verify_loader_b = torch.utils.data.DataLoader(dataset=self.test_dataset, batch_size=self.batch_size, shuffle=False, sampler=verify_sampler_b)
        self.test_loader_a = torch.utils.data.DataLoader(dataset=self.test_dataset, batch_size=self.batch_size, shuffle=False, sampler=test_sampler_a)
        self.test_loader_b = torch.utils.data.DataLoader(dataset=self.test_dataset, batch_size=self.batch_size, shuffle=False, sampler=test_sampler_b)
        
        # net prepare
        self.depth, depth = (4, 8), (4, 8)
        self.net1_conv1 = nn.Conv2d(1, 4, 5, padding=2)
        self.net_pool = nn.MaxPool2d(2, 2)
        self.net1_conv2 = nn.Conv2d(depth[0], depth[1], 5, padding=2)
        
        self.net2_conv1 = nn.Conv2d(1, 4, 5, padding=2)
        self.net2_conv2 = nn.Conv2d(depth[0], depth[1], 5, padding=2)
        self.fc1 = nn.Linear(2 * (self.image_size // 4) ** 2 * depth[1], 1024)
        self.fc2 = nn.Linear(1024, 2 * self.num_classes)
        self.fc3 = nn.Linear(2 * self.num_classes, self.num_classes)
        self.fc4 = nn.Linear(self.num_classes, 1)
        
    def forward(self, x, y, training=True):
        x, y = F.relu(self.net1_conv1(x)), F.relu(self.net2_conv1(y))
        x, y = self.net_pool(x), self.net_pool(y)
        x, y = F.relu(self.net1_conv2(x)), F.relu(self.net2_conv2(y))
        x, y = self.net_pool(x), self.net_pool(y)
        x = x.view(-1, (self.image_size // 4) ** 2 * self.depth[1])
        y = y.view(-1, (self.image_size // 4) ** 2 * self.depth[1])
        z = torch.cat((x, y), 1)
        z = self.fc1(z)
        z = F.relu(z)
        z = F.dropout(z, training=self.training)
        z = F.relu(self.fc2(z))
        z = F.relu(self.fc3(z))
        return F.relu(self.fc4(z))
        
    def gpu_ok(self):
        use_cuda = torch.cuda.is_available()
        dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
        itype = torch.cuda.LongTensor if use_cuda else torch.LongTensor
        
    def copy_origin_weight(self, net):
        self.net1_conv1.weight.data = copy.deepcopy(net.conv1.weight.data)
        self.net1_conv1.bias.data = copy.deepcopy(net.conv1.bias.data)
        self.net1_conv2.weight.data = copy.deepcopy(net.conv2.weight.data)
        self.net1_conv2.bias.data = copy.deepcopy(net.conv2.bias.data)
        self.net2_conv1.weight.data = copy.deepcopy(net.conv1.weight.data)
        self.net2_conv1.bias.data = copy.deepcopy(net.conv1.bias.data)
        self.net2_conv2.weight.data = copy.deepcopy(net.conv2.weight.data)
        self.net2_conv2.bias.data = copy.deepcopy(net.conv2.bias.data)
        
    def copy_origin_weight_nograd(self, net):
        self.copy_origin_weight(net)
        
        self.net1_conv1.weight.requires_grad = False
        self.net1_conv1.bias.requires_grad = False
        self.net1_conv2.weight.requires_grad = False
        self.net1_conv2.bias.requires_grad = False
        self.net2_conv1.weight.requires_grad = False
        self.net2_conv1.bias.requires_grad = False
        self.net2_conv2.weight.requires_grad = False
        self.net2_conv2.bias.requires_grad = False

def main():
    # TranNet().exec()
    net = FigurePlusNet()
    print(model_save_path)
    origin_net = torch.load(model_save_path)
    
    net.copy_origin_weight(origin_net)
    # net.copy_origin_weight_nograd(origin_net)
    criterion = nn.MSELoss()
    optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)
    
    dtype = torch.cuda.FloatTensor if net.gpu_ok() else torch.FloatTensor
    itype = torch.cuda.LongTensor if net.gpu_ok() else torch.LongTensor
    net = net.cuda() if net.gpu_ok() else net
    
    def rightness(y, target):
        out = torch.round(y).type(itype)
        out = out.eq(target).sum()
        return out, len(y)
    
    # if fixed net parameters, need revalue the optimizer
    '''
    new_parameters = []
    for param in net.parameters():
        if param.requires_grad:
            new_parameters.append(param)
    optimizer = optim.SGD(new_parameters, lr=0.0001, momentum=0.9)
    '''
    # train 
    records = []
    for epoch in range(net.num_epochs):
        losses = []
        for index, data in enumerate(zip(net.train_loader_a, net.train_loader_b)):
            (x1, y1), (x2, y2) = data
            if net.gpu_ok():
                x1, y1, x2, y2 = x1.cuda(), y1.cuda(), x2.cuda(), y2.cuda()
            optimizer.zero_grad()
            net.train()
            outputs = net(x1.clone().detach(), x2.clone().detach())
            outputs = outputs.squeeze()
            labels = y1 + y2
            loss = criterion(outputs, labels.type(torch.float))
            loss.backward()
            optimizer.step()
            loss = loss.cpu() if net.gpu_ok() else loss
            losses.append(loss.data.numpy())
            if index % 300 == 0:
                verify_losses = []
                rights = []
                net.eval()
                for verify_data in zip(net.verify_loader_a, net.verify_loader_b):
                    (x1, y1), (x2, y2) = verify_data
                    if net.gpu_ok():
                        x1, y1, x2, y2 = x1.cuda(), y1.cuda(), x2.cuda(), y2.cuda()
                    outputs = net(x1.clone().detach(), x2.clone().detach())
                    outputs = outputs.squeeze()
                    labels = y1 + y2
                    loss = criterion(outputs, labels.type(torch.float))
                    loss = loss.cpu() if net.gpu_ok() else loss
                    verify_losses.append(loss.data.numpy())
                    right = rightness(outputs.data, labels)
                    rights.append(right)
                right_ratio = 1.0 * np.sum([i[0] for i in rights]) / np.sum([i[1] for i in rights])
                print(f'no.{epoch}, {index}/{len(net.train_loader_a)}, train loss:{np.mean(losses)}, verify loss:{np.mean(verify_losses)}, accu: {right_ratio}')
                # records.append([np.mean(losses), np.mean(verify_losses), right_ratio])
                records.append([right_ratio])
    # plot train data
    pyplot.figure(figsize=(8, 6))
    pyplot.plot(records)
    pyplot.xlabel('step')  
    pyplot.ylabel('loss & accuracy')
    
    # test
    rights = []
    net.eval()
    for test_data in zip(net.test_loader_a, net.test_loader_b):
        (x1, y1), (x2, y2) = test_data
        if net.gpu_ok():
            x1, y1, x2, y2 = x1.cuda(), y1.cuda(), x2.cuda(), y2.cuda()
        outputs = net(x1.clone().detach(), x2.clone().detach())
        outputs = outputs.squeeze()
        labels = y1 + y2
        loss = criterion(outputs, labels.type(torch.float))
        right = rightness(outputs, labels)
        rights.append(right)
    right_ratio = 1.0 * np.sum([i[0] for i in rights]) / np.sum([i[1] for i in rights])
    print(f'test accuracy: {right_ratio}')
    pyplot.show()
    

if __name__ == '__main__':
    main()