# -*- coding: utf-8 -*-
from naie.datasets import get_data_reference
from naie.context import Context
import moxing as mox

import zipfile
import os
import time

import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from torchvision import models

from tensorboardX import SummaryWriter
import pandas as pd
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True


class MyDataset(Dataset):
    def __init__(self, root, input_data, aug):
        self.file_data = input_data['FileID'].values
        self.label_data = input_data[
            'SpeciesID'].values if 'SpeciesID' in input_data.columns else None
        self.aug = aug
        self.img_data = [Image.open(root + i + '.jpg') for i in self.file_data]

    def __len__(self):
        return len(self.img_data)

    def __getitem__(self, index):
        img = self.img_data[index]

        if self.aug is not None:
            img = self.aug(img)

        if self.label_data is not None:
            return img, self.file_data[index], self.label_data[index]
        else:
            return img, self.file_data[index]


def train():
    model.train()
    for iteration, (img, fileid, label) in enumerate(train_dataloader):
        # 取出数据和标签 to device to type
        inputs, labels = img.to(device).float(), label.to(device)
        
        # 传播与训练
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # 统计损失和准确率
        _, preds = torch.max(outputs.detach(), 1)
        acc = preds.eq(labels).float().mean()

        train_info = \
        "epoch:[{}/{}] train_iter:[{}/{}], loss:{:.6f}, acc:{:.6f}, LR:{}".format(
            epoch, 
            MAX_EPOCH, 
            iteration + 1, 
            len(train_dataloader), 
            loss.item(), 
            acc, 
            optimizer.param_groups[0]['lr']
        )

        # 打印
        print(train_info)

        # 描入TensorBoard
        n_iter = (epoch - 1) * len(train_dataloader) + iteration + 1
        writer.add_scalar('train/loss', loss.item(), n_iter)
        writer.add_scalar('train/accuracy', acc, n_iter)

        # 写入文件
        txtwriter = open(os.path.join(TensorBoard_DIR, "train.txt"), "a+")
        txtwriter.write(train_info)
        txtwriter.write('\n')
        txtwriter.close()


def valid():
    model.eval()
    val_loss = 0.0
    correct = 0.0
    for idx, (img, fileid, label) in enumerate(valid_dataloader):
        inputs, labels = img.to(device).float(), label.to(device)
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        val_loss += loss.item()
        _, preds = outputs.max(1)
        correct += preds.eq(labels).sum().float()

    valid_info = \
    'epoch:{}, val_loss:{:.6f}, val_acc:{:.6f}'.format(
        epoch,
        val_loss / len(valid_dataloader),
        correct / len(valid_data)
    )

    # 打印
    print(valid_info)

    # 描入TensorBoard
    writer.add_scalar('valid/loss', val_loss / len(valid_dataloader), epoch)
    writer.add_scalar('valid/accuracy', correct / len(valid_data), epoch)

    # 写入文件
    txtwriter = open(os.path.join(TensorBoard_DIR, "valid.txt"), "a+")
    txtwriter.write(valid_info)
    txtwriter.write('\n')
    txtwriter.close()

    return correct / len(valid_data)


def test():
    model.eval()
    ans_file = []
    ans_pred = []
    for _, (img, fileid) in enumerate(test_dataloader):
        inputs = img.to(device).float()
        outputs = model(inputs)
        ans_file.extend(fileid)
        ans_pred.extend(outputs.max(1)[1].detach().cpu().numpy())
    ans = [[ans_file[i], ans_pred[i]] for i in range(len(ans_file))]
    ans = pd.DataFrame(ans, columns=['FileID', 'SpeciesID'])
    ans.to_csv('ans.csv', index=None)
    mox.file.copy('ans.csv', os.path.join(Context.get_output_path(), 'ans.csv'))
    print('ans saved.')


if __name__ == "__main__":

    # 超参数
    BATCH_SIZE = 64
    NUM_WORKERS = 8
    LR = 0.0002
    MAX_EPOCH = 800
    SAVE_EPOCH = 100
    LOAD_CHECKPOINT_PATH = None
    CHECKPOINT_DIR = r'/cache/output/checkpoints/'
    TensorBoard_DIR = r'/cache/output/tensorboards/'

    # 硬件选择
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print('device:', device)

    # 从平台导入数据，并解压
    data_reference = get_data_reference(dataset="DatasetService",
                                        dataset_entity='OceanDataset')
    for file_paths in data_reference.get_files_paths():
        mox.file.copy(file_paths, '/cache/' + file_paths.split('/')[-1])
    zip_file = zipfile.ZipFile('/cache/data.zip')
    zip_list = zip_file.namelist()
    for f in zip_list:
        zip_file.extract(f, '/cache/')

    # 设置图片文件夹路径超参数，并读取相应csv文件
    img_path = '/cache/data/'
    train_csv = pd.read_csv('/cache/training.csv')
    test_csv = pd.read_csv('/cache/test.csv')
    mox.file.copy('annotation.csv', '/cache/annotation.csv')
    # os.system('ls /cache')
    valid_csv = pd.read_csv('/cache/annotation.csv')

    # 图片数据预处理
    train_aug = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    test_aug = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    # 数据导入
    train_data = MyDataset(root=img_path, input_data=train_csv, aug=train_aug)
    train_dataloader = DataLoader(dataset=train_data,
                                  batch_size=BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=NUM_WORKERS)

    valid_data = MyDataset(root=img_path, input_data=valid_csv, aug=test_aug)
    valid_dataloader = DataLoader(dataset=valid_data,
                                  batch_size=BATCH_SIZE,
                                  shuffle=False,
                                  num_workers=NUM_WORKERS)

    test_data = MyDataset(root=img_path, input_data=test_csv, aug=test_aug)
    test_dataloader = DataLoader(dataset=test_data,
                                 batch_size=BATCH_SIZE,
                                 shuffle=False,
                                 num_workers=NUM_WORKERS)

    print('train:{}, valid:{}, test:{}'.format(len(train_data), len(valid_data), len(test_data)))
    print('batch_size:', BATCH_SIZE)

    # 模型
    model = models.densenet169(pretrained=False, num_classes=20)
    model = model.to(device)
    modelname = str(model.__class__).strip('<>').split()[-1].strip("'").split('.')[-1]
    print(modelname)

    # 目标函数，优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=LR)

    # 准备好TensorBoard
    if not os.path.exists(TensorBoard_DIR):  
        os.makedirs(TensorBoard_DIR)
    writer = SummaryWriter(log_dir=TensorBoard_DIR)

    # 保存断点路径设置
    if not os.path.exists(CHECKPOINT_DIR):
        os.makedirs(CHECKPOINT_DIR)
    save_checkpoint_path = os.path.join(CHECKPOINT_DIR, "{modelname}_{type}_epoch_{epoch}.pth")

    # 载入断点
    if LOAD_CHECKPOINT_PATH is not None:
        loaded_checkpoint = torch.load(LOAD_CHECKPOINT_PATH)
        model.load_state_dict(loaded_checkpoint['model_state_dict'])
        load_epoch = LOAD_CHECKPOINT_PATH.split('_')[-1].split('.')[0]
        print('load checkpoint: {}'.format(LOAD_CHECKPOINT_PATH))
    else:
        load_epoch = 0

    # 迭代训练
    print('Start training...')
    best_acc = loaded_checkpoint['current_accuracy'] if LOAD_CHECKPOINT_PATH is not None else 0.0
    for epoch in range(load_epoch + 1, MAX_EPOCH + 1):
        train()

        with torch.no_grad():
            acc = valid()
            if best_acc < acc:
                best_acc = acc
                checkpoint = {'model_state_dict': model.state_dict(),
                              'current_accuracy': acc}
                torch.save(checkpoint, save_checkpoint_path.format(
                    modelname=modelname, 
                    type='best', 
                    epoch=epoch
                    ))
                
                test()

            elif (epoch % SAVE_EPOCH == 0) or (epoch == MAX_EPOCH):
                checkpoint = {'model_state_dict': model.state_dict(),
                              'current_accuracy': acc}
                torch.save(checkpoint, save_checkpoint_path.format(
                    modelname=modelname, 
                    type='regular',
                    epoch=epoch, 
                    ))

    print('Training Task Finished.')
