# -*- coding: utf-8 -*-
from global_settings import *
import load_online_data


class MyDataset(Dataset):
    def __init__(self, root, input_data, aug):
        self.file_data = input_data['FileID'].values
        self.label_data = input_data[
            'SpeciesID'].values.astype(np.int) if 'SpeciesID' in input_data.columns else None
        self.aug = aug
        self.img_data = [Image.open(root + i + '.jpg') for i in self.file_data]

    def __len__(self):
        return len(self.img_data)

    def __getitem__(self, index):
        img = self.img_data[index]

        if self.aug is not None:
            img = self.aug(img)

        if self.label_data is not None:
            return img, self.file_data[index], self.label_data[index]
        else:
            return img, self.file_data[index]


def train():
    model.train()
    for iteration, (img, fileid, label) in enumerate(train_dataloader):
        # 取出数据和标签 to device to type
        inputs, labels = img.to(device).float(), label.to(device)
        
        # 传播与训练
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # 统计损失和准确率
        _, preds = torch.max(outputs.detach(), 1)
        acc = preds.eq(labels).float().mean()

        train_info = \
        "epoch:[{}/{}] train_iter:[{}/{}], loss:{:.6f}, acc:{:.6f}, LR:{}".format(
            epoch, 
            MAX_EPOCH, 
            iteration + 1, 
            len(train_dataloader), 
            loss.item(), 
            acc, 
            optimizer.param_groups[0]['lr']
        )

        # 打印
        print(train_info)

        # 写入文件
        txtwriter = open(os.path.join(SAVE_TXT_DIR, "train.txt"), "a+")
        txtwriter.write(train_info)
        txtwriter.write('\n')
        txtwriter.close()


def valid():
    model.eval()
    val_loss = 0.0
    correct = 0.0
    for idx, (img, fileid, label) in enumerate(valid_dataloader):
        inputs, labels = img.to(device).float(), label.to(device)
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        val_loss += loss.item()
        _, preds = outputs.max(1)
        correct += preds.eq(labels).sum().float()

    valid_info = \
    'epoch:{}, val_loss:{:.6f}, val_acc:{:.6f}'.format(
        epoch,
        val_loss / len(valid_dataloader),
        correct / len(valid_data)
    )

    # 打印
    print(valid_info)

    # 写入文件
    txtwriter = open(os.path.join(SAVE_TXT_DIR, "valid.txt"), "a+")
    txtwriter.write(valid_info)
    txtwriter.write('\n')
    txtwriter.close()

    return correct / len(valid_data)


def test():
    model.eval()
    ans_file = []
    ans_pred = []
    for _, (img, fileid) in enumerate(test_dataloader):
        inputs = img.to(device).float()
        outputs = model(inputs)
        ans_file.extend(fileid)
        ans_pred.extend(outputs.max(1)[1].detach().cpu().numpy())
    ans = [[ans_file[i], ans_pred[i]] for i in range(len(ans_file))]
    ans = pd.DataFrame(ans, columns=['FileID', 'SpeciesID'])
    ans.to_csv('ans.csv', index=None)
    mox.file.copy('ans.csv', os.path.join(Context.get_output_path(), 'ans.csv'))
    print('ans saved.')


# ================ 入口 ===============
if __name__ == '__main__':
    # 硬件选择
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print('device:', device)

    # 设置图片文件夹路径超参数，并读取相应csv文件
    img_path = RAW_DIR + '/af2020cv-v5-formal/' + '/data/'
    # train_csv = pd.read_csv(RAW_DIR + '/af2020cv-v5-formal/' + 'training.csv')
    test_csv = pd.read_csv(RAW_DIR + '/af2020cv-v5-formal/' + 'test.csv')
    mox.file.copy('real_train.csv', RAW_DIR + '/af2020cv-v5-formal/' + 'real_train.csv')
    mox.file.copy('annotation.csv', RAW_DIR + '/af2020cv-v5-formal/' + 'annotation.csv')
    train_csv = pd.read_csv(RAW_DIR + '/af2020cv-v5-formal/' + 'real_train.csv')
    valid_csv = pd.read_csv(RAW_DIR + '/af2020cv-v5-formal/' + 'annotation.csv')

    # 图片数据预处理
    train_aug = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    test_aug = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    # 数据导入
    train_data = MyDataset(root=img_path, input_data=train_csv, aug=train_aug)
    train_dataloader = DataLoader(dataset=train_data,
                                  batch_size=BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=NUM_WORKERS)

    valid_data = MyDataset(root=img_path, input_data=valid_csv, aug=test_aug)
    valid_dataloader = DataLoader(dataset=valid_data,
                                  batch_size=BATCH_SIZE,
                                  shuffle=False,
                                  num_workers=NUM_WORKERS)

    test_data = MyDataset(root=img_path, input_data=test_csv, aug=test_aug)
    test_dataloader = DataLoader(dataset=test_data,
                                 batch_size=BATCH_SIZE,
                                 shuffle=False,
                                 num_workers=NUM_WORKERS)

    print('train:{}, valid:{}, test:{}'.format(len(train_data), len(valid_data), len(test_data)))
    print('batch_size:', BATCH_SIZE)

    
    # 模型
    model = models.resnet101(pretrained=False).to(device)
    # 如果要做迁移学习fine-tune
    if LOAD_PRETRAINED_PATH is not None:
        if torch.cuda.is_available():
            loaded_checkpoint = torch.load(LOAD_PRETRAINED_PATH)
        else:
            loaded_checkpoint = torch.load(LOAD_PRETRAINED_PATH, map_location='cpu')
        model.load_state_dict(loaded_checkpoint)
    fc_features = model.fc.in_features
    model.fc = nn.Linear(fc_features, NUM_CLASSES)
    model = model.to(device)
    # modelname = str(model.__class__).strip('<>').split()[-1].strip("'").split('.')[-1]
    modelname = 'resnet101'
    print(modelname)
    print(model)
    

    # 优化器
    optimizer = optim.Adam(model.fc.parameters(), lr=LR)  # 只管理fully-connected的权重

    # 目标函数
    criterion = nn.CrossEntropyLoss()


    # 保存断点路径设置
    save_checkpoint_path = os.path.join(SAVE_CHECKPOINT_DIR, "{modelname}_{type}_epoch_{epoch}.pth")

    # 载入断点
    if LOAD_CHECKPOINT_PATH is not None:
        loaded_checkpoint = torch.load(LOAD_CHECKPOINT_PATH)
        model.load_state_dict(loaded_checkpoint['model_state_dict'])
        load_epoch = LOAD_CHECKPOINT_PATH.split('_')[-1].split('.')[0]
        print('load checkpoint: {}'.format(LOAD_CHECKPOINT_PATH))
    else:
        load_epoch = 0

    # 迭代训练
    print('Start training...')
    best_acc = loaded_checkpoint['current_accuracy'] if LOAD_CHECKPOINT_PATH is not None else 0.0
    for epoch in range(load_epoch + 1, MAX_EPOCH + 1):
        train()

        with torch.no_grad():
            acc = valid()
            if best_acc < acc:
                best_acc = acc
                checkpoint = {'model_state_dict': model.state_dict(),
                              'current_accuracy': acc}
                torch.save(checkpoint, save_checkpoint_path.format(
                    modelname=modelname, 
                    type='best', 
                    epoch=epoch
                    ))
                
                test()

            elif (epoch % SAVE_EPOCH == 0) or (epoch == MAX_EPOCH):
                checkpoint = {'model_state_dict': model.state_dict(),
                              'current_accuracy': acc}
                torch.save(checkpoint, save_checkpoint_path.format(
                    modelname=modelname, 
                    type='regular',
                    epoch=epoch, 
                    ))

    print('Training Task Finished.')


# 将结果保存到归档目录下，便于打包
import moxing as mox
from naie.context import Context
mox.file.copy_parallel(SAVE_CHECKPOINT_DIR, os.path.join(Context.get_output_path(level='algo'), 'saved_checkpoints'))
print('saved checkpoints copied done.')
