# -*- coding:utf-8 -*-

'''
Image Quality
Yan Keの論文を参考にした

'''

from opencv import cv,adaptors
from PIL import Image
from . import SimpleFilter
import colorsys
import math,numpy#,scipy,scipy.fftpack
import psyco

psyco.full()

def __cropTH(l,th=0.98):
    #1-normalize
    s=float(sum(l))
    if s==0: return l
    _l=l if s==1.0 else [x/s for x in l]
    _th=1-th
    i1=0
    i2=len(l)-1
    s=0
    while True:
        if i1>=i2:
            break
        elif _l[i1]<_l[i2]:
            s+=_l[i1]
            if s>_th:
                break
            else:
                l[i1]=0
                i1+=1
        else:
            s+=_l[i2]
            if s>_th:
                break
            else:
                l[i2]=0
                i2-=1
    return l

def SpatialBoundingOfEdges(PILImage,th=0.98):
    '''
    Spatial Distribution of 98% of Edge
    Good Imagesはエッジが局在する傾向にある
    '''
    width,height=PILImage.size
    imgs=PILImage.split()
    edges=[]
    for channel in imgs:
        _channel=adaptors.PIL2Ipl(channel)
        
        #勾配の最大値を求める
        _channel2=cv.cvCreateImage(cv.cvSize(width,height),cv.IPL_DEPTH_16S,1)
        cv.cvLaplace(_channel,_channel2,3)
        maxval=cv.cvMinMaxLoc(_channel2)[1]
        #cv.cvReleaseImage(_channel2)
        del _channel2
        
        maxval*=0.8
        th=600*math.pow(maxval/600,2) if maxval<600.0 else 600.0
        
        #エッジの抽出
        _edge=cv.cvCreateImage(cv.cvSize(width,height),cv.IPL_DEPTH_8U,1)
        cv.cvCanny(_channel,_edge,th*0.4,th)
        edges.append(adaptors.Ipl2NumPy(_edge))
        #cv.cvReleaseImage(_edge)
        del _edge
        
        
        #cv.cvReleaseImage(_channel)
        del _channel
        
    edge=adaptors.NumPy2PIL(sum(edges)/3)
    edgePix=edge.resize((100,100),Image.BILINEAR)
    del edges,imgs,edge

    edgePix=edgePix.load()
    rangeX=__cropTH([sum([edgePix[x,y] for y in xrange(100)]) for x in xrange(100)],th)
    rangeY=__cropTH([sum([edgePix[x,y] for x in xrange(100)]) for y in xrange(100)],th)
    del edgePix
    
    return 1-len([p for p in rangeX if p>0])*len([p for p in rangeY if p>0])/10000.0

def SpatialBoundingOfEdgesManual(PILImage,th=0.98,cannyParams=(150.0,600.0)):
    '''
    Spatial Distribution of 98% of Edge
    Good Imagesはエッジが局在する傾向にある
    '''
    width,height=PILImage.size
    imgs=PILImage.split()
    edges=[]
    for channel in imgs:
        _channel=adaptors.PIL2Ipl(channel)
        
        #エッジの抽出
        _edge=cv.cvCreateImage(cv.cvSize(width,height),cv.IPL_DEPTH_8U,1)
        cv.cvCanny(_channel,_edge,cannyParams[0],cannyParams[1])
        edges.append(adaptors.Ipl2NumPy(_edge))
        #cv.cvReleaseImage(_edge)
        del _edge
        
        
        #cv.cvReleaseImage(_channel)
        del _channel
        
    edge=adaptors.NumPy2PIL(sum(edges)/3)
    edgePix=edge.resize((100,100),Image.BILINEAR)
    del edges,imgs,edge

    edgePix=edgePix.load()
    rangeX=__cropTH([sum([edgePix[x,y] for y in xrange(100)]) for x in xrange(100)],th)
    rangeY=__cropTH([sum([edgePix[x,y] for x in xrange(100)]) for y in xrange(100)],th)
    del edgePix
    
    return 1-len([p for p in rangeX if p>0])*len([p for p in rangeY if p>0])/10000.0

def ContrastBounding(PILImage,th=0.98,thumbnailSize=None):
    '''
    Contrast Distribution
    GoodImageはコントラストが高い
    '''
    img=PILImage if thumbnailSize is None else SimpleFilter.Zoom(PILImage,thumbnailSize,False)
    hist=PILImage.convert("L").histogram()
    rangeH=__cropTH(hist,th)
    
    return len([h for h in rangeH if h>0])/255.0 

def HueCount(PILImage,hueSensitivity=0.05,thumbnailSize=640):
    '''
    Hue Count
    Good ImageはHue Countが少ない
    
    -thumbnailSizeをNoneにすると縮小をかけずにできるが、死ぬほど遅くなるかもしれないので要注意
    '''
    img=PILImage if thumbnailSize is None else SimpleFilter.Zoom(PILImage,thumbnailSize,False)
    pix=img.getdata()
    w,h=img.size
    
    hist=[0]*20
    _h=[0.05*i for i in xrange(20)]
    for rgb in pix:
        h,s,v=colorsys.rgb_to_hsv(rgb[0]/255.0,rgb[1]/255.0,rgb[2]/255.0)
        if s<0.2: continue
        if v<0.15 or v>0.95: continue
        
        for i in xrange(20):
            if h>_h[19-i]:
                hist[i]+=1
                break
    m=max(hist)
    return 1-len([h for h in hist if h>m*hueSensitivity])/20.0
    
    ###width,height=PILImage.size
    ###_img=adaptors.PIL2Ipl(PILImage)
    ###_hsv=cv.cvCreateImage(cv.cvSize(width,height),cv.IPL_DEPTH_8U,3)
    ###cv.cvCvtColor(_img,_hsv,cv.CV_BGR2HSV)
    ###cv.cvReleaseImage(_img)
    ###del _img
    
    ###vsh=adaptors.Ipl2NumPy(_hsv)
    ###cv.cvReleaseImage(_hsv)
    ###del _hsv
    
    ###vsh.shape=(width*height,3)

    ###histiter=[p[2]/180.0 for p in vsh if p[1]>=255*0.2 and p[0]>=255*0.15 and p[0]<255*0.95]
    ###_h=numpy.histogram(hist,numpy.arange(0,1,0.05))
    ###hist=[0]*20
    ###for h in histiter:
        ###for i in xrange(20):
            ###if h>_h[19-i]:
                ###hist[i]+=1
                ###break
    ###m=max(hist)
    ###return 1-len([h for h in hist if h>m*hueSensitivity])/20.0
    
    #####対応がややこしい!!
    ####r,g,b=PILImage.getpixel((3,0))
    ####h,s,v=colorsys.rgb_to_hsv(r/255.0,g/255.0,b/255.0)
    ####print (v*255,s*255,h*180),"==",vsh[0,3]
    
def Sharpness(PILImage,freq_th=5,thumbnailSize=320):
    '''
    Blur images are almost always worse than sharp ones. 
    
    -thumbnailSizeをNoneにすると縮小をかけずにできるが、死ぬほど遅くなるかもしれないので要注意
    '''
    img=PILImage if thumbnailSize is None else SimpleFilter.Zoom(PILImage,thumbnailSize,False)
    w,h=img.size
    pix=img.convert("L").load()
    pixArray=numpy.array([[pix[x,y]/255.0 for x in xrange(w)] for y in xrange(h)])
    freqArray=numpy.abs(numpy.fft.fft2(pixArray))#scipy.fftpack.fft2(pixArray))
    return len([f for f in freqArray.flat if f>freq_th])/float(w*h)