#!/usr/bin/env python
# coding: utf-8


import os
import cv2
import torch
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import KFold
import albumentations as A  # 数据增强

import matplotlib.pyplot as plt
from torchvision import transforms as T
from torch.utils.data import DataLoader, Dataset, Subset



EPOCHES = 120
BATCH_SIZE = 8
IMAGE_SIZE = 512
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'


train_trfm = A.Compose([
    # A.RandomCrop(NEW_SIZE*3, NEW_SIZE*3),
    A.Resize(IMAGE_SIZE, IMAGE_SIZE),
    A.HorizontalFlip(p=0.5),
    A.VerticalFlip(p=0.5),
    A.RandomRotate90(),
    A.OneOf([
        A.RandomGamma(),  # 亮度矫正
        A.RandomBrightnessContrast(),  # 随机亮度对比度
        A.ColorJitter(brightness=0.07, contrast=0.07,
                   saturation=0.1, hue=0.1, always_apply=False, p=0.3),  # 随机修改亮度饱和度，色相，对比度等
        ], p=0.3),
    ])


val_trfm = A.Compose([
    # A.CenterCrop(NEW_SIZE, NEW_SIZE),
    A.Resize(IMAGE_SIZE, IMAGE_SIZE),
    A.HorizontalFlip(p=0.5),
    A.VerticalFlip(p=0.5),
    A.RandomRotate90(),
])

# mask转游程编码
def rle_encode(im: np.ndarray):
    '''
    im: numpy array, 1 - mask, 0 - background
    Returns run length as string formated
    '''
    pixels = im.flatten(order = 'F')
    pixels = np.concatenate([[0], pixels, [0]])
    # 变化的位置
    runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
    # 当前变化位置减去上一个变化的位置等于长度
    runs[1::2] -= runs[::2]
    return ' '.join(str(x) for x in runs)

# 游程编码转mask
def rle_decode(mask_rle: str, shape=(512, 512)):
    '''
    mask_rle: run-length as string formated (start length)
    shape: (height, width) of array to return 
    Returns numpy array, 1 - mask, 0 - background
    '''
    if not isinstance(mask_rle, str):
        mask_rle = ''
    s = mask_rle.split()
    # 将编码拆为起点和长度
    starts, lengths = [np.asarray(x, dtype=int) for x in (s[0::2], s[1::2])]
    starts -= 1
    ends = starts + lengths
    img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
    for lo, hi in zip(starts, ends):
        img[lo:hi] = 1
    return img.reshape(shape, order='F')



class TianChiDataset(Dataset):
    def __init__(self, data_dir, transform, test_mode=False):
        self.data_dir = data_dir
        paths, _ = self.read_data()
        self.transform = transform
        self.test_mode = test_mode
        self.as_tensor = T.Compose([
            T.ToPILImage(),
            T.Resize(IMAGE_SIZE),
            T.ToTensor(),
            T.Normalize([0.625, 0.448, 0.688],
                        [0.131, 0.177, 0.101]),
        ])
        print(len(self))

    def read_data(self):
        data_dir = self.data_dir
        data_path = os.path.join(data_dir, 'train_mask.csv')
        train_mask = pd.read_csv(data_path, sep='\t', names=['name', 'mask'])
        train_mask['name'] = train_mask['name'].apply(lambda x: os.path.join(data_dir, 'train', x))        
        self.paths = train_mask['name'].values
        # fillna pandas缺省值处理
        # print(len(self.paths), len(train_mask['mask'].values))
        self.rles = train_mask['mask'].fillna('').values
        # print(len(self.paths), len(self.rles))
        # # print(self.rles[0])
        # exit()
        return self.paths, self.rles

        
    # get data operation
    def __getitem__(self, index):
        img = cv2.imread(self.paths[index])
        if not self.test_mode:
            # mask = rle_decode(self.rles.iloc[index])
            mask = rle_decode(self.rles[index])
            augments = self.transform(image=img, mask=mask)
            return self.as_tensor(augments['image']), augments['mask'][None]
        else:
            return self.as_tensor(img), ''        
    
    def __len__(self):
        """
        Total number of samples in the dataset
        """
        return len(self.paths)


def test():
    data_dir = '/home/slz/data/earth_online'
    data_path = os.path.join(data_dir, 'train_mask.csv')
    train_mask = pd.read_csv(data_path, sep='\t', names=['name', 'mask'])
    train_mask['name'] = train_mask['name'].apply(lambda x: os.path.join(data_dir, 'train', x))

    # 验证编码解码方法是否正确
    img = cv2.imread(train_mask['name'].iloc[0])
    mask = rle_decode(train_mask['mask'].iloc[0])
    print(rle_encode(mask) == train_mask['mask'].iloc[0])



if __name__ == "__main__":
    data_dir = '/home/slz/data/earth_online'

    dataset = TianChiDataset(data_dir=data_dir,
                            transform=train_trfm, 
                            test_mode=False
        )

    skf = KFold(n_splits=5)  # 5折交叉验证，分成5个子集
    idx = np.array(range(len(dataset)))
    # 5折，每一折分别作为验证集进行训练测试
    for fold_idx, (train_idx, valid_idx) in enumerate(skf.split(dataset, y=None)):
        # 控制测试几折
        if fold_idx != 3:
            continue
        # 根据索引取数据集
        train_ds = Subset(dataset, train_idx)
        valid_ds = Subset(dataset, valid_idx)

        # define training and validation data loaders
        loader = DataLoader(
            train_ds, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)

        vloader = DataLoader(
            valid_ds, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
        
        for image, target in loader:
            print(image)
            print(target)
            exit()

