# _*_coding     : UTF_8_*_
# Author        :Jie Shen
# CreatTime     :2021/12/17 0:08

import os
import numpy as np
import pandas as pd
import  torch
import  torch.nn as nn
import  torch.nn.functional as F
import  torch.optim as optim
from  torchvision import datasets, transforms
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler

from sklearn.utils import shuffle
pd_ = pd.read_csv('out/end.csv')
pd_ = shuffle(pd_)

train_len = int(0.6*len(pd_))
val_len = int(0.2*len(pd_))
test_len = len(pd_)-train_len-val_len

train_dataset = pd_[:train_len:]
train_label_ = train_dataset['季节'][:train_len:]

val_dataset = pd_[train_len:train_len+val_len:]
val_label_ = val_dataset['季节']

test_dataset = pd_[train_len+val_len::]
test_label_ = test_dataset['季节']

def one_hot(labels):
  '''
  将1D列表进行one-hot编码
  '''
  labels = np.array(labels)
  labels = labels.reshape(-1,1) # 必须执行，不然报错
  one_hot_encoder = OneHotEncoder()
  labels_onehot = one_hot_encoder.fit_transform(labels)
  return torch.FloatTensor(labels_onehot.toarray())

train_label = one_hot(train_label_)
val_label = one_hot(val_label_)
test_label = one_hot(test_label_)

train_dataset.drop('季节',axis=1,inplace=True)
val_dataset.drop('季节',axis=1,inplace=True)
test_dataset.drop('季节',axis=1,inplace=True)


# 引入标准化
Scaler1 = StandardScaler()
Scaler2 = StandardScaler()
Scaler3 = StandardScaler()
train_dataset = pd.DataFrame(Scaler1.fit_transform(train_dataset)) # 里面的labels已经被标准化了无法使用了
val_dataset  = pd.DataFrame(Scaler2.fit_transform(val_dataset))
test_dataset = pd.DataFrame(Scaler3.fit_transform(test_dataset))

class Weather(nn.Module):

    def __init__(self):
        super(Weather, self).__init__()

        self.model = nn.Sequential(
            nn.Linear(220, 200),
            nn.LeakyReLU(inplace=True),
            nn.Linear(200, 200),
            nn.LeakyReLU(inplace=True),
            nn.Linear(200, 4),
            nn.LeakyReLU(inplace=True),
        )

    def forward(self, x):
        x = self.model(x)
        return x

# 用GPU训练数据
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = Weather().to(device)
optimizer = optim.SGD(net.parameters(), lr=0.01)
criteon = nn.CrossEntropyLoss().to(device)

epochs = 200
batch_size = 64
batch_num_train = int(len(train_dataset)//batch_size)
batch_num_val = int(len(val_dataset)//batch_size)
batch_num_test = int(len(test_dataset)//batch_size)

for epoch in range(epochs):
    train_loss = 0
    for i in range(batch_num_train + 1):
        start = i * batch_size
        end = start + batch_size
        logits = net(torch.FloatTensor(train_dataset[start:end:].values))
        loss = criteon(logits, torch.FloatTensor(train_label[start:end:]))
        train_loss += loss.item()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    # 每跑完一个epoch
    val_loss = 0
    correct = 0

    for i in range(batch_num_val + 1):
        start = i * batch_size
        end = start + batch_size
        logits = net(torch.FloatTensor(val_dataset[start:end:].values))
        val_loss += criteon(logits, torch.FloatTensor(val_label[start:end:])).item()
        pred = logits.argmax(dim=1)
        tmp = torch.FloatTensor(val_label[start:end:]).argmax(dim=1)
        correct_ = pred.eq(tmp).sum()
        correct += correct_

        # 不确定是否能取到最大值
        # pred = logits.data.max(1)[1]
        # print(pred,logits.data)
        # correct += pred.eq(val_label.data).sum()

    if epoch % 5 == 0:
        print("val_loss:", val_loss)

        print('\nVAL set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            val_loss, correct, len(val_label),
            100. * correct / len(val_label)))

test_loss = 0
correct = 0
for i in range(batch_num_test + 1):
    start = i * batch_size
    end = start + batch_size
    logits = net(torch.FloatTensor(test_dataset[start:end:].values))
    test_loss += criteon(logits, torch.FloatTensor(test_label[start:end:])).item()
    pred = logits.argmax(dim=1)
    tmp = torch.FloatTensor(val_label[start:end:]).argmax(dim=1)
    correct_ = pred.eq(tmp).sum()
    correct += correct_

print("val_loss:", val_loss)

print('\nVAL set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
    val_loss, correct, len(val_label),
    100. * correct / len(val_label)))