import pandas as pd
import numpy as np
import torch
from torch import nn
from sklearn.model_selection import KFold

train_features = np.load('./data/train_features.npy', allow_pickle=True)
train_labels = np.load('./data/train_labels_onehot.npy', allow_pickle=True)
test_features = np.load('./data/test_features.npy', allow_pickle=True)

num_inputs = 21
num_outputs = 39


class Residual(nn.Module):
    def __init__(self, num_inputs, num_outputs):
        super(Residual, self).__init__()
        self.middle_L = nn.Linear(num_inputs, num_outputs)
        self.middle_R = nn.ReLU(num_outputs)
        if num_inputs != num_outputs:
            self.right = nn.Linear(num_inputs, num_outputs)
        else:
            self.right = None
        self.middle_B = nn.BatchNorm1d(num_outputs)

    def forward(self, X):
        Y = self.middle_B(self.middle_R(self.middle_L(X)))
        if self.right:
            X = self.right(X)
        return Y + X


class build_model(nn.Module):
    def __init__(self, num_inputs, num_outputs, dp=0.5):
        super(build_model, self).__init__()
        self.net = nn.Sequential()
        self.net.add_module('Residual1', Residual(num_inputs, 1024))
        self.net.add_module('Residual2', Residual(1024, 512))
        self.net.add_module('Residual3', Residual(512, 512))
        self.net.add_module('Residual4', Residual(512, 256))
        self.net.add_module('Dropout1', nn.Dropout(dp))
        self.net.add_module('Residual5', Residual(256, 256))
        self.net.add_module('Residual6', Residual(256, 128))
        self.net.add_module('Residual7', Residual(128, 128))
        self.net.add_module('Residual8', Residual(128, 64))
        self.net.add_module('Dropout2', nn.Dropout(dp))
        self.net.add_module('Residual9', Residual(64, 64))
        self.net.add_module('Linear-out', nn.Linear(64, num_outputs))
        self.net.add_module('Softmax', nn.Softmax(dim=-1))

    def forward(self, x):
        return self.net(x)


class MultiClassLogLoss(torch.nn.Module):
    def __init__(self):
        super(MultiClassLogLoss, self).__init__()

    def forward(self, y_pred, y_true):
        return -(y_true *
                 torch.log(y_pred.float() + 1.00000000e-15)) / y_true.shape[0]


class sf_crime():
    def __init__(self, num_epochs, k_fold_num, batch_size, k_fold):
        self.num_epochs = num_epochs
        self.k_fold_num = k_fold_num
        self.batch_size = batch_size
        self.k_fold = k_fold
        self.run()

    def make_iter(self, train_features, train_labels):
        train_features = torch.tensor(train_features.astype(np.float32), dtype=torch.float).to(device)
        train_labels = torch.tensor(train_labels).to(device)
        dataset = torch.utils.data.TensorDataset(train_features, train_labels)
        return torch.utils.data.DataLoader(dataset, self.batch_size, shuffle=True)

    def show_loss(self, features, labels, team):
        net.eval()
        batch = self.make_iter(features, labels)
        loss_num = 0
        n = 0
        for x, y in batch:
            loss_num += loss(net(x), y).sum().item()
            n += 1
        print(team, end=' ')
        print('loss:', loss_num / n)

    def train(self, features, labels):
        net.train()
        train_iter = self.make_iter(features, labels)
        for X, y in train_iter:
            y_hat = net(X)
            l = loss(y_hat, y).sum()
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
        self.show_loss(features, labels, '训练集')

    def run(self):
        if self.k_fold:
            kf = KFold(n_splits=self.k_fold_num, shuffle=True)
            for epoch in range(self.num_epochs):
                fold_num = 0
                for train_index, test_index in kf.split(train_features):
                    X_train, X_test = train_features[train_index], train_features[
                        test_index]
                    y_train, y_test = train_labels[train_index], train_labels[
                        test_index]
                    print('第%d轮的第%d折：' % (epoch + 1, fold_num + 1))
                    fold_num += 1
                    self.train(X_train, y_train)
                    self.show_loss(X_test, y_test, '测试集')
        else:
            for epoch in range(self.num_epochs):
                print('第%d轮：' % (epoch + 1))
                self.train(train_features, train_labels)

    def write(self, version):
        net.eval()
        test_iter = torch.utils.data.DataLoader(
            torch.tensor(test_features.astype(np.float32),
                         dtype=torch.float).to(device), 1024, shuffle=False
        )

        testResult = [line for x in test_iter for line in net(x).cpu().detach().numpy()]
        sampleSubmission = pd.read_csv('input/sf-crime/sampleSubmission.csv.zip')
        Result_pd = pd.DataFrame(testResult,
                                 index=sampleSubmission.index,
                                 columns=sampleSubmission.columns[1:])
        Result_pd.to_csv('working/sampleSubmission(' + str(version) + ').csv', index_label='Id')
        torch.save(net, 'working/net(' + str(version) + ').pkl')
        print('Finish!')


device = torch.device("cuda:0" if torch.cuda.is_available() else "mps")
loss = MultiClassLogLoss().to(device)
net = build_model(num_inputs, num_outputs).to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
sf_crime_7 = sf_crime(num_epochs=1, k_fold_num=1, batch_size=128, k_fold=False)
sf_crime_7.write('v4')

# 此部分数据是上交到kaggle后由GPU跑出，参数未变，但由于缺少计算资源，在CPU上运行时间太长，加之代码、硬件变化，不予复现
# notebook:https://www.kaggle.com/doublepoi/a-nn-with-residual-v4
# 但已保留训练好的net一个，位于/working目录下

if __name__ == '__main__':
    print()
