#训练函数,在test中加入了计算评价指标这一步,不仅仅用loss评定
import numpy as np
import torch
from tqdm import tqdm

from evaluation_metrix import calculate_batch_metrics

def Train_and_Test_each_epoch(device, model, train_dataset, test_dataset, train_loader, test_loader, criterion, optimizer, num_epochs, patience=15):
    print("------------start traning-----------")
    best_test_loss = np.inf
    epochs_no_improve = 0
    #学习率自动调整,学习率自动调整器是可选则的,更换学习率调整器同时也需要更换后面scheduler.step()中传入的参数
    #scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.1, patience=10)
    scheduler = CyclicLR(optimizer, base_lr=0.00001, max_lr=0.1, step_size_up=2000, mode='triangular2')
    all_train_loss = []
    all_test_loss = []
    #Train
    for epoch in range(num_epochs):
        model.train()
        # if torch.cuda.device_count() > 1:
        #     model = nn.DataParallel(model, device_ids=device_ids)
        train_running_loss = 0
        train_total_samples = 0
        train_counter = 0
        print('Epoch {}/{}'.format(epoch, num_epochs))
        #logger.info('Epoch {}/{}'.format(epoch, num_epochs))
        tk0 = tqdm(train_loader, total=int(len(train_loader)))

        for batch_id, (X_batch, y_batch) in enumerate(tk0):
            X_batch, y_batch = X_batch.to('cpu'), y_batch.to('cpu')
            X_batch, y_batch = X_batch.to(device), y_batch.to(device)
            optimizer.zero_grad()
            outputs = model(X_batch)
            loss = criterion(outputs, y_batch)
            loss.backward()
            optimizer.step()

            train_running_loss += loss.item() * X_batch.size(0)
            train_total_samples += X_batch.size(0)

        train_average_loss = train_running_loss/train_total_samples
        print(f"Epoch:{epoch}/{num_epochs} train_loss={train_average_loss:.20f}")
        all_train_loss.append(train_average_loss)

        train_counter += 1
        tk0.set_postfix(loss=(train_running_loss / (train_counter * train_loader.batch_size)))
        #---------------Test---------------
        with torch.no_grad():
            model.eval()
            #model = nn.DataParallel(model)
            test_running_loss = 0
            test_total_samples = 0
            test_average_loss = 0
            test_counter=0

            batch_total_num = 0 #总共有几个batch
            rmse_total = [0,0,0]
            mae_total = [0,0,0]
            mape_total = [0,0,0]
            tk1 = tqdm(test_loader, total=int(len(test_loader)))
            for val_id, (X_batch, y_batch) in enumerate(tk1):
                X_batch, y_batch = X_batch.to('cpu'), y_batch.to('cpu')
                X_batch, y_batch = X_batch.to(device), y_batch.to(device)
                outputs = model(X_batch)
                loss = criterion(outputs, y_batch)
                test_running_loss += loss.item() * X_batch.size(0)
                test_total_samples += X_batch.size(0)

                y_batch_unnormalize,outputs_unnormalize = test_dataset.unnormalize_y(y_batch),test_dataset.unnormalize_y(outputs)
                rmse_batch_list, mae_batch_list,mape_batch_list=calculate_batch_metrics(y_batch_unnormalize,outputs_unnormalize)
                rmse_total = [a + b for a, b in zip(rmse_total, rmse_batch_list)]
                mae_total = [a + b for a, b in zip(mae_total,mae_batch_list)]
                mape_total = [a + b for a, b in zip(mape_total,mape_batch_list)]

                batch_total_num += 1 
                test_counter+=1
                tk1.set_postfix(loss=(test_running_loss / test_total_samples))

            test_average_loss = test_running_loss / test_total_samples
            print(f"Epoch:{epoch}/{num_epochs} test loss={test_average_loss:.20f}")
            all_test_loss.append(test_average_loss)

            average_rmse = [x / batch_total_num for x in rmse_total]
            average_rmse = [x.item() for x in average_rmse]
            average_mae = [x / batch_total_num for x in mae_total]
            average_mae = [x.item() for x in average_mae]
            average_mape = [x / batch_total_num for x in mape_total]
            average_mape = [x.item() for x in average_mape]
            print(f"average rmse={average_rmse}\naverage mae={average_mae}\naverage mape={average_mape}")
            

        if test_average_loss < best_test_loss:
            best_test_loss = test_average_loss
            epochs_no_improve = 0
            print("epochs_no_improve=",epochs_no_improve)
        else:
            epochs_no_improve += 1
            print("epochs_no_improve=",epochs_no_improve)
        if epochs_no_improve == patience:
            print("early stop")
            print(f"best train loss={min(all_train_loss)},epoch={all_train_loss.index(min(all_train_loss))}")
            print(f"best test loss={min(all_train_loss)},epoch={all_test_loss.index(min(all_test_loss))}")
            print(f"average train loss={sum(all_train_loss)/len(all_train_loss)}")
            print(f"average test loss={sum(all_test_loss)/len(all_test_loss)}")
            print(f"last epoch train loss={all_train_loss[-1]}")
            print(f"last epoch test loss={all_test_loss[-1]}")
            print("------------end traning-----------")
            break

        #scheduler.step(test_average_loss)
        scheduler.step()
        print("Current learning rate:", [group['lr'] for group in optimizer.param_groups])

        if(epoch == num_epochs-1): #如果没有早停
            print("early stop")
            print(f"best train loss={min(all_train_loss)},epoch={all_train_loss.index(min(all_train_loss))}")
            print(f"best test loss={min(all_train_loss)},epoch={all_test_loss.index(min(all_test_loss))}")
            print(f"average train loss={sum(all_train_loss)/len(all_train_loss)}")
            print(f"average test loss={sum(all_test_loss)/len(all_test_loss)}")
            print(f"last epoch train loss={all_train_loss[-1]}")
            print(f"last epoch test loss={all_test_loss[-1]}")
            print("------------end traning-----------")