import pickle
import torch
import sys
import os
import numpy as np
import platform
from PIL import Image

from matplotlib import pyplot as plt
import matplotlib

device = torch.device("cuda:0")
class DataLoader(object):
	def __init__(self,loaddatasets = True,MAX_SEQ_LEN = 100):
		self.Normalize_meanVal = {}
		self.Normalize_stdVal = {}
		self.Normalize_minVal = {}
		self.Normalize_maxVal = {}
		self.MAX_SEQ_LEN = MAX_SEQ_LEN
		print('On ',platform.system())
		if(platform.system()=='Windows'):
			self.split = '\\'
		else:
			self.split = '/'
		self.rootdir = 'datasets'
		self.dataPointer = {'test':0 , 'train':0 , 'valid':0}
		self.deltaPointer = {'test':0 , 'train':0 , 'valid':0}
		self.dataTensor = {}
		self.deltaTensor = {}
		self.num_feats = 1
		if loaddatasets == True:
			self.dataTensor['test'] = self.LoadPickleFile('test')
			self.dataTensor['train'] = self.LoadPickleFile('train')
			self.dataTensor['valid'] = self.LoadPickleFile('valid')
			self.deltaTensor['train'] = self.CalDelta('train')
			self.deltaTensor['valid'] = self.CalDelta('valid')
			self.NormalizeDelta('train')
			self.NormalizeDelta('valid')
	def LoadPickleFile(self,datasetclass):
		data = []
		datadir = self.rootdir + self.split + datasetclass
		namelist = os.listdir(datadir)
		for i in range(0,len(namelist)):		#人名中遍历
			typedir = os.path.join(datadir,namelist[i])
			typelist = os.listdir(typedir)

			for j in range(0,len(typelist)):	#地点中遍历
				filedir = os.path.join(typedir, typelist[j])
				filelist = os.listdir(filedir)

				for filename in filelist:		#最后一层文件名遍历
					if filename == 'target_dic.pickle':
						filepath = filedir + self.split +filename
						file = open(filepath,'rb')
						dic = pickle.load(file)
						file.close()

						for value in dic.values():
							v = value[0]
							data.append(v)						#将100*64*64的一个图片序列加到data序列List中
		data = torch.tensor(data).type(torch.float)
		print("LoadPickle "+datasetclass+" Done! ",data.size())
		return data

	def CalDelta(self,datasetclass):
		piclist = self.dataTensor[datasetclass].reshape([-1,100,4096])
		piclist = torch.sum(piclist,dim = 2)
		piclist = piclist[:,1:self.MAX_SEQ_LEN] - piclist[:,0:self.MAX_SEQ_LEN-1]
		piclist = torch.unsqueeze(piclist, dim = 2)
		print('CalDelta '+datasetclass+' Done! ',piclist.size())
		return piclist

	def NormalizeDelta(self,datasetclass):
		type = 'max-min'
		print('数据集来自: ',datasetclass)
		if type == 'Z-score':
			'''
				x = (x-mean(x))/std(x)
			'''
			self.Normalize_meanVal[datasetclass] = torch.mean(self.deltaTensor[datasetclass])
			self.Normalize_stdVal[datasetclass] = torch.std(self.deltaTensor[datasetclass])
			self.deltaTensor[datasetclass] = (self.deltaTensor[datasetclass] - self.Normalize_meanVal[datasetclass]) / self.Normalize_stdVal[datasetclass]
			print('mean,std:',self.Normalize_meanVal[datasetclass],self.Normalize_stdVal[datasetclass])

		elif type == 'max-min':
			'''
				x = (x - min) / (max - min)
			'''			
			self.Normalize_minVal[datasetclass] = torch.min(self.deltaTensor[datasetclass])
			self.Normalize_maxVal[datasetclass] = torch.max(self.deltaTensor[datasetclass])
			self.deltaTensor[datasetclass] = (self.deltaTensor[datasetclass] - self.Normalize_minVal[datasetclass]) / (self.Normalize_maxVal[datasetclass] - self.Normalize_minVal[datasetclass])
			print('min,max:',self.Normalize_minVal[datasetclass],self.Normalize_maxVal[datasetclass])



	def Anti_NormalizeDelta(self,data):
		type = 'max-min'
		if type == 'Z-score':
			data = data * self.Normalize_stdVal['train'] + self.Normalize_meanVal['train']
		elif type =='max-min':
			data = data * (self.Normalize_maxVal['train'] - self.Normalize_minVal['train']) + self.Normalize_minVal['train']
		return data

	def rewind(self,datasetclass):
		self.dataPointer[datasetclass] = 0

	def rewind_delta(self,datasetclass):
		self.deltaPointer[datasetclass] = 0

	def get_batch(self, BATCH_SIZE, MAX_SEQ_LEN, datasetclass):					
	#这里默认MAX_SQL_LEN = 100不变，后期再做修改
		MAX_BATCH = self.dataTensor[datasetclass].size(0)

		if self.dataPointer[datasetclass] + BATCH_SIZE > MAX_BATCH:
			BATCH_SIZE = MAX_BATCH - self.dataPointer[datasetclass]

		pointer = self.dataPointer[datasetclass]
		data = self.dataTensor[datasetclass][pointer:pointer+BATCH_SIZE,0:MAX_SEQ_LEN,:,:]
		self.dataPointer[datasetclass] = self.dataPointer[datasetclass] + BATCH_SIZE
		data = data.reshape([BATCH_SIZE,100,64*64])
		return data

	def get_delta_batch(self, BATCH_SIZE, MAX_SEQ_LEN, datasetclass):
		MAX_BATCH = self.dataTensor[datasetclass].size(0)
		
		if self.deltaPointer[datasetclass] + BATCH_SIZE > MAX_BATCH:
			BATCH_SIZE = MAX_BATCH - self.deltaPointer[datasetclass]

		pointer = self.deltaPointer[datasetclass]
		data = self.deltaTensor[datasetclass][pointer:pointer+BATCH_SIZE,:,:]
		self.deltaPointer[datasetclass] = self.deltaPointer[datasetclass] + BATCH_SIZE

		return data
   
	def print_in_plot(self,deltadata,epoch,epoch_classify):
		plt.clf()
		deltadata = deltadata.squeeze()
		range = deltadata.size(0)
		x = np.arange(1,range+1)
		y = deltadata.reshape([range]).detach().numpy()
		plt.title("SEQ_LEN = " + str(self.MAX_SEQ_LEN)) 
		plt.xlabel("Frame") 
		plt.ylabel("Bright delta") 
		plt.plot(x,y) 

		DIR = "Training_Out"+self.split+"SEQ_LEN"+str(self.MAX_SEQ_LEN)
		if not os.path.exists(DIR):
			os.makedirs(DIR)
		plt.savefig(DIR+self.split+"plt_"+epoch_classify+str(epoch)+".png")
			
	def save_data(self,picname,picdata):
		picdata = picdata.reshape([100,64,64])
		v = np.hstack(picdata).reshape(64, 6400)
		v = v * 255
		im = Image.fromarray(v)
		if im.mode == "F":
			im = im.convert('L')		
		im.save("Out"+self.split+picname+".jpeg")


if __name__ == "__main__":
	SEQ_LEN = 20
	dataloader = DataLoader(MAX_SEQ_LEN = SEQ_LEN)
	delta = dataloader.get_delta_batch(1,MAX_SEQ_LEN = SEQ_LEN, datasetclass = 'train')
	print(delta)
