import caffe
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PyQt4.QtCore import *
from PyQt4.QtGui import *


def vis_square(data):

    
    # normalize data for display
    data = (data - data.min()) / (data.max() - data.min())
    
    # force the number of filters to be square
    n = int(np.ceil(np.sqrt(data.shape[0])))
    padding = (((0, n ** 2 - data.shape[0]),
               (0, 1), (0, 1))                 # add some space between filters
               + ((0, 0),) * (data.ndim - 3))  # don't pad the last dimension (if there is one)
    data = np.pad(data, padding, mode='constant', constant_values=1)  # pad with ones (white)
    
    # tile the filters into an image
    data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
    
    plt.imshow(data); plt.axis('off')
    plt.show()

#image=cv2.imread("timg.jpg")
#cv2.imshow("rest",image)
#cv2.waitKey(0)


caffe.set_mode_gpu()
#model_def="model/lenet.prototxt"
#model_weight="model/mnist_iter_10000.caffemodel"
model_def="model/lenet.prototxt"
model_weight="model/mnist_iter_10000.caffemodel"
image_path="test/f.png"
net=caffe.Net(model_def,model_weight,caffe.TEST)
transform = caffe.io.Transformer( {'data':net.blobs['data'].data.shape} )


if net.blobs['data'].data.shape[1] ==1:
    image=caffe.io.load_image(image_path, False)
    transform.set_transpose('data', (2,0,1) )
    transform.set_raw_scale('data', 255.0 )
elif net.blobs['data'].data.shape[1] ==3:
    image=caffe.io.load_image(image_path, True)
    
    transform.set_transpose('data', (2,0,1) )
    transform.set_raw_scale('data', 255.0 )
    transform.set_channel_swap('data', (2,1,0) )

image=transform.preprocess("data", image)
net.blobs['data'].data[...]=image

output=net.forward()

#print output['prob'][0].argmax()
feat = net.blobs['prob'].data[0]
print feat

#feat=net.blobs['conv1'].data[0,0]
#vis_square(feat)
#plt.imshow(feat,cmap ='gray')
#plt.show()



'''
def cv2_to_qimage(cv_img):
    
    cha= len(cv_img.shape)
    
    height=cv_img.shape[0]
    width= cv_img.shape[1]
    
    bgra = np.zeros([height, width,4], dtype=np.uint8)
    
    if(cha==2):
        bgra[...,0] = cv_img
        bgra[...,1] = cv_img
        bgra[...,2] = cv_img
    else:
        bgra[...,0] = cv_img[...,0]
        bgra[...,1] = cv_img[...,1]
        bgra[...,2] = cv_img[...,2] 
    return QImage(bgra.data, width, height, QImage.Format_RGB32 )

image=cv2.imread("demo.png")
cv2_to_qimage(image).save("./qunima.png")
'''


