# -*- coding: utf-8 -*-
# @Time    : 2021/7/7 14:46
# @Author  : LuoTianHang

# ####################datalist.py 说明##########################
# this script is used for the train.py to prepare the data
import os
import random

import cv2
import numpy as np
import pandas as pd
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm import tqdm

# ToTensor()
# 将shape为（HWC)的numpy.ndarray 或 img 转为 shape为（CHW）的tensor，
# 其将每一个数值归一化到[0,1]，其归一化方法比较简单，直接除以255即可


# Normalize()
# 在transforms.Compose([transforms.ToTensor()])中加入transforms.Normalize(),如下所示：
# transforms.Compose([transforms.ToTensor(),transforms.Normalize(std=(0.5,0.5,0.5),mean=(0.5,0.5,0.5))])
# 其作用是先将输入归一化到(0,1)，再使用公式（x-mean）/std，将每个元素分不到（-1，1）

data_transform = transforms.Compose([
    transforms.Grayscale(3),
    # transforms.ToTensor(),
    # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
'''
使用img 和 csv文件

作者的这种dataloader设计，batchsize只能设置为1
'''


class MCNNDataset(Dataset):
    def __init__(self, data_type, data_path, gt_path, shuffle=True, preload=False):
        super(MCNNDataset, self).__init__()
        self.data_type = data_type
        self.data_path = data_path
        self.gt_path = gt_path
        self.data_files = [filename for filename in os.listdir(self.data_path) if
                           os.path.isfile(os.path.join(self.data_path, filename))]
        self.data_files.sort()  # 按名字整理
        self.down_sampling = False
        # 固定shuffle值保证实验可以被重复效果复现
        if shuffle:
            random.seed(2468)

    def __len__(self):
        return len(self.data_files)

    def __getitem__(self, index):
        fname = self.data_files[index]
        img = Image.open(os.path.join(self.data_path, fname)).convert('L')
        w = img.width
        h = img.height
        width = img.width // 4 * 4
        height = img.height // 4 * 4
        img = img.resize((width, height))
        den = pd.read_csv(os.path.join(self.gt_path, os.path.splitext(fname)[0] + '.csv'), sep=',',
                          header=None).values
        den = den.astype(np.float32, copy=False)

        wd_1 = width // 4
        ht_1 = height // 4
        den = cv2.resize(den, (wd_1, ht_1))
        den = den * ((w * h) / (wd_1 * ht_1))


        return torch.tensor(data_transform(img)), torch.tensor(den)


class MCNNDataset2(Dataset):
    def __init__(self, data_type, data, shuffle=True, preload=False):
        super(MCNNDataset2, self).__init__()
        self.data_type = data_type

        self.data_files = data
        self.data_files.sort()  # 按名字整理
        self.down_sampling = False
        # 固定shuffle值保证实验可以被重复效果复现
        if shuffle:
            random.seed(2468)

        self.blob_list = []
        if preload:
            self.preload_data()

    def preload_data(self):
        print(self.data_type, " data preload will cost a long time, please be patient...")

        for fname in tqdm(self.data_files, desc=str(self.data_type)):
            img = Image.open(os.path.join(self.data_path, fname)).convert('L')
            w = img.width
            h = img.height
            width = img.width // 4 * 4
            height = img.height // 4 * 4
            img.resize((width, height))
            den = pd.read_csv(os.path.join(self.gt_path, os.path.splitext(fname)[0] + '.csv'), sep=',',
                              header=None).values
            den = den.astype(np.float32, copy=False)
            if self.down_sampling:  # 对数据resize，这样计算一个batch可以快一点，一般的深度学习都是这么干的
                wd_1 = width // 4
                ht_1 = height // 4
                den = cv2.resize(den, (wd_1, ht_1))
                den = den * ((w * h) / (wd_1 * ht_1))
            else:
                den = cv2.resize(den, (width, height))
                den = den * ((w * h) / (width * height))

            den = den.reshape((1, den.shape[0], den.shape[1], 1))
            blob = {}
            blob['data'] = img
            blob['gt_density'] = den
            blob['fname'] = fname
            self.blob_list.append(blob)

        print(self.data_type, " Complete load data")

    def __len__(self):
        return len(self.data_files)

    def __getitem__(self, index):

        fname = self.data_files[index]
        img = Image.open(fname).convert('L')
        w = img.width
        h = img.height
        width = img.width // 4 * 4
        height = img.height // 4 * 4
        img = img.resize((width, height))
        den = pd.read_csv(fname.replace('.jpg', '.csv').replace('img', 'den'), sep=',',
                          header=None).values
        den = den.astype(np.float32, copy=False)

        wd_1 = width // 4
        ht_1 = height // 4

        den = cv2.resize(den, (wd_1, ht_1))
        den = den * ((w * h) / (wd_1 * ht_1))


        return torch.from_numpy(np.array(img,dtype=np.float32)).unsqueeze(0), torch.tensor(den).unsqueeze(0)
