import scipy.io as scio
import scipy
import numpy as np
import mat4py
import h5py
import numpy
import cv2
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import sys

test_transform = transforms.Compose([
    transforms.ToTensor(),
    # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

# path = '/home/zsy/Desktop/sex/data/dataset/saved_dataset_batch4.mat'
image_path="/home/zsy/Desktop/sex/data/face_im/face_im23.mat"
age_label_path="/home/zsy/Desktop/sex/data/age_label/age_label1.txt "
im_flip_path="/home/zsy/Desktop/sex/data/im_flip/im_flip1.txt"

# for data_path in range(len("/home/zsy/Desktop/sex/data/face_im")):
#     print(data_path )

image_data = scio.loadmat(image_path)
# age_label_data=open(age_label_path)
# im_flip_data =open(im_flip_path)
# print(data.keys())
t=image_data['face_im']
print(type(t[0,0]))
# t=test_transform(t)
x=torch.Tensor(t[0,0])
# t[0,0].imshow()
unloader = transforms.ToPILImage()
image =x.cpu().clone()  # clone the tensor
# image = image.squeeze(0)  # remove the fake batch dimension
image = unloader(image)
image.show()

print(type(1))

# data=h5py.File(path)
# x=list(data.keys())
# print(x)
# # print(type(data['age_label'].value))
# print(data['face_im'].value)
# # print(type(data['gender_label'].value))
# t=data['face_im']
# print(t.size)
# print(sys.getsizeof(t))
# b=data['face_im'].value[1,1]
# print(1)
#
# myfile=h5py.File(path)
# data = [myfile[element[0]][:] for element in myfile['face_im']]
# print (data[1].shape)
# print(type(feature))#读取mat文件
# print(feature.keys() )
# print(feature.values())


# student1 = mat4py.loadmat(path)
# student1 = student1['student']
# print
# type(student1)  # dict
# print
# ','.join(['%s' % key for key, val in student1.iteritems()])