from torch.utils.data import DataLoader, Dataset
import matplotlib.pyplot as plt
import torch
from torchvision import transforms
import numpy as np
from sklearn.preprocessing import scale
from itertools import islice
import os
import cv2
import csv
import re
from datetime import datetime
from args import read_args

args = read_args()

train_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.ToTensor(),  # 将图片转成 Tensor, 并把数值normalize到[0,1](data normalization)
])

if_constrained = args.if_constrained

class EUAData(Dataset):
	def __init__(self, data_f_dir='/home/hlf/code/code_LSTM+CNN/data_set/' + if_constrained + '/hours.csv',
				 data_l_dir='/home/hlf/code/code_LSTM+CNN/data_set/' + if_constrained + '/days.csv',
				 data_img_f_dir = '/home/hlf/code/code_LSTM+CNN/data_set/hours_2021.6.8_2022.5.31/img'):

		def nnormalization(data_raw):
			count = 0
			data_raw_list = []
			for i in range(1, len(data_raw)):
				date_str = data_raw[i].strip().split(',')[0]
				date = datetime.strptime(date_str, "%Y/%m/%d %H:%M")
				date_b = datetime(2021, 11, 1, 7, 0, 0)
				date_e = datetime(2022, 5, 31, 21, 0, 0)
				if date > date_b and date < date_e:
					if date.hour == 13:
						count += 1
						# print(count)
						continue
				tmp = data_raw[i].strip().split(',')[1:]
				tmp = list(map(float, tmp))
				data_raw_list.append(tmp[:])
			data_raw_list = np.array(data_raw_list)
			# data_list = normalize(data_raw_list)
			# data_list = scale(data_raw_list)
			if if_constrained == 'constrained':
				data_list = scale(data_raw_list)
			else:
				return data_raw_list
			return data_list

		def str2float(lista):
			for i in range(len(lista)):
				lista[i] = float(lista[i])
			return lista

		self.step_size = 27
		f_data_f = open(data_f_dir, 'r')
		f_data_l = open(data_l_dir, 'r')
		# f_data_img_f = open(data_img_f_dir, 'r')
		_data_f = f_data_f.readlines()
		_data_l = f_data_l.readlines()
		data_f_test = nnormalization(_data_f)
		self.data_f = []
		self.data_l = []
		self.data_img_f = []
		# for i in range(1, len(_data_f) - self.step_size, 10):
		# 	tmp = []
		# 	for j in range(self.step_size):
		# 		ttmp = _data_f[i + j].strip().split(',')[1:]
		# 		ttmp = list(map(float, ttmp))
		# 		tmp.append(ttmp)
		# 	# tmp.append(_data[i+self.step_size].strip().split(',')[1:5])
		# 	# assert(len(tmp) == self.step_size + 1)
		# 	self.data_f.append(tmp)

		# 处理数值特征
		for i in range(0, len(data_f_test) - self.step_size, 9):
			tmp = []
			for j in range(self.step_size):
				ttmp = data_f_test[i + j]
				tmp.append(ttmp)
			self.data_f.append(tmp)

		# print(len(_data_l))
		# 处理标签文件
		for i in range(4, len(_data_l)):
			tmp = _data_l[i].strip().split(',')[1:]
			self.data_l.append(tmp[:4])

		# 处理image特征
		# for line in islice(f_data_img_f, 0, None):
		# 	line = line.split(' ')[1:]
		# 	self.data_img_f.append(str2float(line))
		image_list = sorted(os.listdir(data_img_f_dir))
		for i, file in enumerate(image_list):
			img = cv2.imread(os.path.join(data_img_f_dir + '/', file))
			self.data_img_f.append(cv2.resize(img, (128, 128)))



	# def __getitem__(self, index):
	# 	fn = self.data[index]
	# 	dat = fn[:-1]
	# 	dat = [list(map(float, sub_dat)) for sub_dat in dat]
	# 	dat = torch.Tensor(dat)
	# 	label = fn[-1]
	# 	label = list(map(float, label))
	# 	label = torch.Tensor(label)
	# 	return dat, label

	def download_data(self):
		return self.data_f, self.data_img_f, self.data_l

	def __len__(self):
		return len(self.data_f)


class TimeSeriesDataset(Dataset):
	def __init__(self, x, img_x, y):
		self.x = x.astype(np.float32)
		self.img_x = img_x
		self.y = y.astype(np.float32)
		self.transform = train_transform

	def __len__(self):
		return len(self.x)

	def __getitem__(self, idx):
		return (self.x[idx], self.transform(self.img_x[idx]), self.y[idx])


if __name__ == "__main__":
	test = EUAData()
	# print(len(test.data_f))
	# print(len(test.data_l))
	print(test.__getitem__(1)[0])
	print('ok')
