'''
模型的训练代码 
'''
from pickle import load
import time

import torch
from torch import nn
from torch.optim import Adam
from torch.utils.data import DataLoader
# from torch.nn.functional import 
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split

from dl_modules import DLPriceModel, PriceDataSet
from constant import CHECK_POINT,DatasetLabelPath,DatasetTrainPath,LabelScalerPath,OUTPUT_PATH

DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
# DEVICE = 'cpu'

at_heads=6
en_deepth=1
kernel_size=5
rnn_deep=1

epoches=None
batchsize=100
num_workers=0
lr=1e-3
test_size_rate=0.1
n_iter_no_change=10


print(torch.__version__)
print(f'DEVICE={DEVICE}')

data = np.load(DatasetTrainPath).astype(np.float32)
label = np.load(DatasetLabelPath).astype(np.float32)
if data.shape[0] != label.shape[0]:
    raise 'The batch size of data is different from that of label'

X_train, X_test, y_train, y_test = train_test_split(data, label, test_size=test_size_rate)

X_train = torch.tensor(X_train, device=DEVICE)
y_train = torch.tensor(y_train, device=DEVICE)
X_test = torch.tensor(X_test, device=DEVICE)

model = DLPriceModel(
    data.shape[1], data.shape[1] + 5, 
    at_heads, en_deepth, kernel_size, rnn_deep)
model.to(DEVICE).train()
loss_fun = nn.L1Loss().to(DEVICE)
optim = Adam(model.parameters(), lr)

loader = DataLoader(
    PriceDataSet(X_train, y_train, DEVICE),batchsize,shuffle=True,
    num_workers=num_workers,persistent_workers=False)
bar = tqdm(loader)

# 训练流程
# def main():
    
epoch = 0
best_parameters = False
best_score = np.inf
iter_no_change = 0

train_at = time.time()

while True:
    epoch += 1
    losses = []
    for batch_data,batch_label in bar:
        output = model(batch_data)
        loss = loss_fun(output, batch_label)
        optim.zero_grad()
        loss.backward()
        optim.step()
        losses.append(loss.item())
        bar.set_postfix_str(
            f'epoch[{epoch}:{epoches}]|loss_mean[{np.mean(losses)}]'
        )
    model.eval()
    with torch.no_grad():
        tmp_score = mean_absolute_error(
            y_test, model(X_test).cpu().numpy()
        )
    model.train()
    if tmp_score <= best_score:
        iter_no_change = 0
        best_score = tmp_score
        best_parameters = True
        print('record parameters')
        torch.save(model.state_dict(),'pth/parameters.pth')
    else:
        iter_no_change += 1
    print('tmp_score',tmp_score, 'best_score', best_score, 'iter_no_change', iter_no_change)
    if epoches is None and iter_no_change > n_iter_no_change \
        or epoches is not None and epoch >= epoches:
        break
    torch.cuda.empty_cache()

print('train consume', time.time() - train_at)

if best_parameters:
    model.load_state_dict(torch.load('pth/parameters.pth', map_location=DEVICE))
    model.eval()
    with torch.no_grad():
        tmp_score = mean_absolute_error(
            y_test, model.eval()(X_test).cpu().numpy()
        )
    print('loaded score', tmp_score)

# 计算训练集分数
with open(LabelScalerPath,'br') as f:
    label_scaler = load(f)
out = []
model.eval()
bar = tqdm(DataLoader(PriceDataSet(torch.tensor(data, device=DEVICE)),1500,shuffle=True))
for batch_data in bar:
    with torch.no_grad():
        output = model(batch_data)
    out.append(output.detach().cpu().numpy())
out = np.concatenate(out,0)
print('out.shape',out.shape)

print('trainset score',mean_absolute_error(label, out),mean_absolute_error(
    label_scaler.inverse_transform(label), 
    label_scaler.inverse_transform(out)))

# 导出为ONNX
model.save_to_onnx(data[:1],f'{CHECK_POINT}{model.get_name()}.onnx')

# if __name__ == '__main__':
#     # 开始训练
#     main()