# -*- coding:utf-8 -*-
import numpy as np
from matplotlib import pyplot as plt

import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data as Data

from preprocess_data import get_data
# (1,2,4,5,8,10,11,20,22,40,44,55,88,110,220,440)
width = 44
eps = 1e-8
num_class = 200
parameters = np.load('saved_models/select/width-%s.npz'%width)
index = parameters['best_points']

# step 1. 准备数据
train_features_all,train_labels_all = get_data('训练集')
test_features,test_labels = get_data('测试集')

# train_features_all = train_features_all[:,index]
# test_features = test_features[:,index]

train_features_all = train_features_all.reshape(-1,440//width,width)
test_features = test_features.reshape(-1,440//width,width)

# 切分0.9数据train为valid集合
train_features = train_features_all[:train_features_all.shape[0]*9//10]
valid_features = train_features_all[train_features_all.shape[0]*9//10:]
train_labels = train_labels_all[:train_features_all.shape[0]*9//10]
valid_labels = train_labels_all[train_features_all.shape[0]*9//10:]

std = np.std(train_features,axis = 0,keepdims = True)
mean = np.mean(train_features,axis = 0,keepdims = True)

train_features = (train_features - mean)/(std+eps)
valid_features = (valid_features - mean)/(std+eps)
test_features = (test_features - mean)/(std+eps)

# 做标签
train_labels_onehot = np.zeros((train_labels.shape[0], num_class))
train_labels_onehot[np.arange(train_labels.shape[0]),train_labels-1] = 1

valid_labels_onehot = np.zeros((valid_labels.shape[0], num_class))
valid_labels_onehot[np.arange(valid_labels.shape[0]),valid_labels-1] = 1

test_labels_onehot = np.zeros((test_labels.shape[0], num_class))
test_labels_onehot[np.arange(test_labels.shape[0]),test_labels-1] = 1

# step 2. pytorch部分
# 模型参数
class Net(nn.Module):
    """docstring for Net"""
    def __init__(self,width):
        super(Net, self).__init__()
        self.width = width
        self.height = 440//width
        self.conv1 = nn.Conv2d(1,10,kernel_size=5,padding=2)
        self.conv2 = nn.Conv2d(10,20,kernel_size=5,padding=2)
        self.conv2_drop = nn.Dropout2d(p=0.5)           # 随机丢弃
        self.dropout = nn.Dropout(0.5)
        self.fc = nn.Linear(20*(((self.width+2)//3+2)//3)*(((self.height+2)//3+2)//3), 200)   # 隐藏层线性输出
        # self.out = nn.Linear(300,200)        # 输出层线性输出
    def forward(self,x):
        x = x.unsqueeze(1) # [batch_size,1,height,width]
        x = F.relu(F.max_pool2d(self.conv1(x), 3,padding=1))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 3,padding=1))
        x = x.view(-1, 20*(((self.width+2)//3+2)//3)*(((self.height+2)//3+2)//3))
        x = self.fc(x)
        x = F.softmax(x,dim = 1)
        return x

def evaluate(model, data_iter):
    device = next(model.parameters()).device
    model.eval()
    all_labels = []
    all_pred_labels = []

    with torch.no_grad():
        for i, (batch_x,batch_y) in enumerate(data_iter):
            batch_x,batch_y = batch_x.to(device),batch_y.to(device)
            outputs = model(batch_x)

            labels = batch_y.data.cpu().numpy()
            pred_prob = outputs.data.cpu()

            labels = np.argmax(labels,axis = 1)
            pred_labels = np.argmax(pred_prob,axis = 1)

            all_labels.append(labels)
            all_pred_labels.append(pred_labels)

        all_labels = np.concatenate(all_labels,axis = 0)
        all_pred_labels = np.concatenate(all_pred_labels,axis = 0)

        acc = np.sum(all_pred_labels == all_labels)/all_pred_labels.shape[0]
    return acc

device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
net = Net(width).to(device)
print(net)
optimizer = torch.optim.Adam(net.parameters(),lr = 0.001) # adam优化器
loss_func = torch.nn.BCELoss() # BCE损失函数
batch_size = 8
early_stop = 100
improve = 0

train_features = torch.Tensor(train_features)
valid_features = torch.Tensor(valid_features)
test_features = torch.Tensor(test_features)

train_labels_onehot = torch.Tensor(train_labels_onehot)
valid_labels_onehot = torch.Tensor(valid_labels_onehot)
test_labels_onehot = torch.Tensor(test_labels_onehot)

torch_dataset = Data.TensorDataset(train_features,train_labels_onehot)
loader = Data.DataLoader(
    dataset = torch_dataset,
    batch_size = batch_size,
    shuffle = True
    # num_workers
)


torch_dataset_valid = Data.TensorDataset(valid_features,valid_labels_onehot)
loader_valid = Data.DataLoader(
    dataset = torch_dataset_valid,
    batch_size = batch_size,
    shuffle = True
    # num_workers
)

torch_dataset_test = Data.TensorDataset(test_features,test_labels_onehot)
loader_test = Data.DataLoader(
    dataset = torch_dataset_test,
    batch_size = batch_size,
    shuffle = True
    # num_workers
)


t = 1
best_acc = 0
iters,accs = [],[]
for epoch in range(20):
    for step,(batch_x,batch_y) in enumerate(loader):
        optimizer.zero_grad()
        logits = net(batch_x.to(device))
        loss = loss_func(logits,batch_y.to(device))
        loss.backward()
        optimizer.step()
        if t % 50 == 0:
            acc = evaluate(net,loader_valid)
            iters.append(t)
            accs.append(acc)
            if acc > best_acc:
                improve = 0
                torch.save(net.state_dict(), 'saved_models/CNN.pth')
                best_acc = acc
                print('iter:{},valid acc={},best'.format(t,acc))
            else:
                improve += 1
                print('iter:{},valid acc={}'.format(t,acc))
        if improve > early_stop:
            break
        t += 1

print('testing...')
test_acc = evaluate(net,loader_test)
print('test_acc:%.2f'%test_acc)

fig, ax = plt.subplots()
ax.plot(iters, accs)
ax.set(xlabel='iter', ylabel='accuracy',
       title='The accuracy varies with the number of iterations')
ax.grid()
fig.savefig("acc/acc-CNN-%d.png"%(width))
plt.show()