#coding=utf-8
import pickle
import torch
import sys
import os
import numpy as np
from PIL import Image
import platform

device = torch.device("cuda:0")
class DataLoader(object):
	def __init__(self,loaddatasets = True):
		print('On ',platform.system())
		if(platform.system()=='Windows'):
			self.split = '\\'
		else:
			self.split = '/'
		self.rootdir = "datasets"
		self.dataPointer = {'test':0 , 'train':0 , 'valid':0}
		self.dataTensor = {} #self.dataTensor[datasets][batch:seq:height:width]
		self.num_feats = 64*64
		if loaddatasets == True:
			self.dataTensor['test'] = self.LoadPickleFile('test')
			self.dataTensor['train'] = self.LoadPickleFile('train')
			self.dataTensor['valid'] = self.LoadPickleFile('valid')
			print("Train size = ",self.dataTensor['train'].size())
			print("Valid size = ",self.dataTensor['valid'].size())

	def LoadPickleFile(self,datasetclass):
		data = []
		datadir = self.rootdir + self.split + datasetclass
		namelist = os.listdir(datadir)
		for i in range(0,len(namelist)):		#人名中遍历
			typedir = os.path.join(datadir,namelist[i])
			typelist = os.listdir(typedir)

			for j in range(0,len(typelist)):	#地点中遍历
				filedir = os.path.join(typedir, typelist[j])
				filelist = os.listdir(filedir)

				for filename in filelist:		#最后一层文件名遍历
					if filename == 'target_dic.pickle':
						filepath = filedir + self.split + filename
						file = open(filepath,'rb')
						dic = pickle.load(file)
						file.close()

						for value in dic.values():
							v = value[0]
							data.append(v)						#将100*64*64的一个图片序列加到data序列List中
		return torch.tensor(data).type(torch.float).to(device)

	def rewind(self,datasetclass):
		'''
		作用：
			在每一个Epoch开始前调用此函数，将pointer指向0，从头开始取真实样本
		'''
		self.dataPointer[datasetclass] = 0

	def get_batch(self, BATCH_SIZE, MAX_SEQ_LEN, datasetclass):
		'''
		作用：
			根据BATCH_SIZE及SEQ_LEN返回一定大小的真实样本
		步骤：
			判断同一Epoch中剩下的batch数量 (all - pointer)
				如果大于参数BATCH_SIZE 则按指定BATCH_SIZE返回	(real_batch = BATCH_SIZE)
				如果小于参数BATCH_SIZE 只能按剩下的返回		(real_batch = all - pointer)
			更新pointer指向的下标（pointer = pointer + real_batch）
			将取值数据reshape([batch,seq,64*64])
			简单除以255归一化
		'''
		real_batch = BATCH_SIZE
		if self.dataTensor[datasetclass].size(0) - self.dataPointer[datasetclass]  < BATCH_SIZE:
			real_batch = self.dataTensor[datasetclass].size(0) - self.dataPointer[datasetclass]
		pointer = self.dataPointer[datasetclass]
		data = self.dataTensor[datasetclass][pointer:pointer+real_batch,0:MAX_SEQ_LEN,:,:]	#第0维度表示训练集分类（python字典键值） 第一维表示BATCH 第二位表示序列长度 第三维第四维度表示图片长宽
		self.dataPointer[datasetclass] = self.dataPointer[datasetclass] + real_batch
		data = data.reshape([real_batch,MAX_SEQ_LEN,self.num_feats])
		data = data / 255
		return data

	def save_data(self,filename,picdata):
		picdata = picdata.reshape([100,64,64])
		v = np.hstack(picdata).reshape(64, 6400)	#hstack.reshape将100帧图片横向拼接 最后得到一张图片
		v = v * 255
		im = Image.fromarray(v)
		if im.mode == "F":
			im = im.convert('L')		
		im.save("Training_Out"+self.split+filename+".jpeg")