#!/usr/bin/env python
# coding: utf-8
import os
import sys
import numpy as np
import time
from tqdm import tqdm
import warnings
import logging
warnings.filterwarnings('ignore')
from sklearn.model_selection import KFold
import torch
from torch.utils.data import DataLoader, Dataset, Subset
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

root_dir = os.path.dirname(os.path.abspath(__file__))
print(root_dir)
sys.path.append(root_dir)
from utils import loss_fn, set_seeds, validation
from dataset import TianChiDataset, train_trfm
from models import get_model
set_seeds()

EPOCHES = 120
BATCH_SIZE = 4
IMAGE_SIZE = 512
DEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'

logging.basicConfig(filename='log_unetplusplus_sh_fold_3_continue2.log',
                    format='%(asctime)s - %(name)s - %(levelname)s -%(module)s:  %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S ',
                    level=logging.INFO)

header = r'''
        Train | Valid
Epoch |  Loss |  Loss | Time, m
'''
#          Epoch         metrics            time
raw_line = '{:6d}' + '\u2502{:7.4f}'*2 + '\u2502{:6.2f}'
#print(header)
logging.info(header)

data_dir = '/home/slz/data/earth_online'
data_dir = '/dataset/earth_online'


dataset = TianChiDataset(data_dir=data_dir,
                        transform=train_trfm, 
                        test_mode=False
    )

skf = KFold(n_splits=5)  # 5折交叉验证，分成5个子集
idx = np.array(range(len(dataset)))
# 5折，每一折分别作为验证集进行训练测试
for fold_idx, (train_idx, valid_idx) in enumerate(skf.split(dataset, y=None)):
    # 控制测试几折
    if fold_idx != 3:
        continue
    # 根据索引取数据集
    train_ds = Subset(dataset, train_idx)
    valid_ds = Subset(dataset, valid_idx)

    # define training and validation data loaders
    loader = DataLoader(
        train_ds, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)

    vloader = DataLoader(
        valid_ds, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)

    model = get_model()
    model.to(DEVICE)
    optimizer = torch.optim.AdamW(model.parameters(),  lr=1e-4, weight_decay=1e-3)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=5)
    
    best_loss = 10
    for epoch in range(EPOCHES):
        losses = []
        start_time = time.time()
        model.train()
        for image, target in tqdm(loader):

            image, target = image.to(DEVICE), target.float().to(DEVICE)
            optimizer.zero_grad()
            output = model(image)
            loss = loss_fn(output, target)
            #loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            losses.append(loss.item())
            # print(loss.item())
        
        vloss = validation(model, vloader, loss_fn)
        scheduler.step(vloss)
        logging.info(raw_line.format(epoch, np.array(losses).mean(), vloss,
                                  (time.time()-start_time)/60**1))

        losses = []

        if vloss < best_loss:
            best_loss = vloss
            torch.save(model.state_dict(), 'fold{}_uppmodel_new3.pth'.format(fold_idx))
            print("best loss is {}".format(best_loss))


