# -*- coding: utf-8 -*-
"""
@Time:Created on 2020/7/05
@author: Qichang Zhao
"""

import random
import os
import time
from models import AlexNet
from dataset import CustomDataSet, collater
from torch.utils.data import DataLoader
from prefetch_generator import BackgroundGenerator
from tqdm import tqdm
from tensorboardX import SummaryWriter
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from sklearn.metrics import accuracy_score, f1_score

def shuffle_dataset(dataset, seed):
    np.random.seed(seed)
    np.random.shuffle(dataset)
    return dataset

class hyperparameter():
    def __init__(self):
        self.Learning_rate = 1e-10
        self.Epoch = 10
        self.Batch_size = 256
        self.validation_split = 0.2
        self.weight_decay = 5e-4
        self.patience = 20

os.environ["CUDA_VISIBLE_DEVICES"] = "1"
if __name__ == "__main__":
    """select seed"""
    SEED = 1234
    random.seed(SEED)
    torch.manual_seed(SEED)
    torch.cuda.manual_seed_all(SEED)
    # torch.backends.cudnn.deterministic = True

    save_path = "./25/"
    """Output files."""
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    """init hyperparameters"""
    hp = hyperparameter()

    """Load preprocessed data."""
    load_train_path = "./Dataset/Train/"
    train_files = os.listdir(load_train_path)
    with open("./Dataset/gred_img_train.txt", "r") as f:
        gred_list_train = f.read().strip().split('\n')
    train_set = [x for x in train_files if x not in gred_list_train]

    print("load data")
    print("data shuffle")
    train_set = shuffle_dataset(train_set, SEED)

    dataset = CustomDataSet(train_set)
    dataset_len = len(dataset)
    valid_size = int(0.2 * dataset_len)
    train_size = dataset_len - valid_size
    train_dataset, valid_dataset = torch.utils.data.random_split(dataset, [train_size, valid_size])
    train_collate_fn = collater("./Dataset/Train/")
    train_dataset_load = DataLoader(train_dataset, batch_size=hp.Batch_size, shuffle=True, num_workers=2,
                                    collate_fn=train_collate_fn)
    valid_dataset_load = DataLoader(valid_dataset, batch_size=hp.Batch_size, shuffle=False, num_workers=2,
                                    collate_fn=train_collate_fn)

    """ create model"""
    model = alexnet(num_classes=25).cuda()
    """weight initialize"""
    weight_p, bias_p = [], []
    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)

    for name, p in model.named_parameters():
        if 'bias' in name:
            bias_p += [p]
        else:
            weight_p += [p]

    optimizer = optim.AdamW(
        [{'params': weight_p, 'weight_decay': hp.weight_decay}, {'params': bias_p, 'weight_decay': 0}],
        lr=hp.Learning_rate)
    scheduler_lr = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=10)
    Loss = nn.CrossEntropyLoss()
    print(model)
    """ 使用tensorboardX来跟踪实验"""
    tb_path = "./25/learning rate find/"
    if not os.path.exists(tb_path):
        os.makedirs(tb_path)
    note = 'learning_rate'
    writer = SummaryWriter(log_dir=tb_path, comment=note)

    """Start training."""
    print('Training...')

    for epoch in range(1, hp.Epoch + 1):
        trian_pbar = tqdm(
            enumerate(
                BackgroundGenerator(train_dataset_load)),
            total=len(train_dataset_load))
        """train"""
        train_losses_in_epoch = []
        model.train()
        for trian_i, train_data in trian_pbar:
            '''data preparation '''
            trian_img, trian_labels = train_data
            trian_img = trian_img.cuda()
            trian_labels = trian_labels.cuda()
            '''前向传播与反向传播'''
            '''梯度置0'''
            optimizer.zero_grad()
            # 正向传播，反向传播，优化
            predicted_interaction = model(trian_img)
            train_loss = Loss(predicted_interaction, trian_labels)
            train_losses_in_epoch.append(train_loss.item())
            train_loss.backward()
            optimizer.step()
        train_loss_a_epoch = np.average(train_losses_in_epoch)  # 一次epoch的平均训练loss
        current_lr = optimizer.param_groups[0]['lr']
        print("Epoch:{};Loss:{};LR:{}".format(epoch,train_loss_a_epoch,current_lr))
        writer.add_scalar('Train Loss', train_loss_a_epoch, epoch)
        writer.add_scalar('LR', current_lr, epoch)
        writer.add_scalar('LR with train_loss_a_epoch', train_loss_a_epoch, current_lr)
        scheduler_lr.step()