import cv
import cv2
import numpy as np
import collections

def extract( image, region ):
    histogram = dict()
    imgcp = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
    cv.Copy( image, imgcp )
    for pixel in region:
        color = imgcp[pixel[1], pixel[0]]
        if color not in histogram.keys():
            histogram[color] = 1
        else:
            histogram[color] += 1
    
    
    prevailing = histogram.keys()[0]
    for color in histogram.keys():
        if histogram[color] > histogram[prevailing]:
            prevailing = color
    
    start = region_start( region )
    height = region_height( region )
    width = region_width( region )
    
    # dirty
    imgcp = cropImage( imgcp, start[0] - 5, start[1] - 2, start[0] + width + 5, start[1] + height + 2 )
    
    avg_color = np.average( histogram.keys(), 0, histogram.values() )
    cv.Threshold( imgcp, imgcp, int( avg_color * 0.75 ), 255, 0 )
    
    
    return imgcp
    
    
    
def manually_convolve( img1, img2 ):
    for x in range ( 0, img1.width ):
        for y in range ( 0, img1.height ):
            img1[y, x] *= img2[y, x] / 255.0
    return img1

# so far just red - hard-coded
def split_channels( img ):
    if img.nChannels == 1:
        return img
   
    new_img = cv.CreateImage( cv.GetSize( img ), img.depth, 1 )
    for y in range( 0, img.height ):
        for x in range( 0, img.width ):
            new_img[y, x] = img[y, x][2];
    return new_img

def thicken_contour( image, edge_col=255, median_radius=1 ):
    img = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
    kernel = cv.CreateMat( 2 * median_radius + 1, 2 * median_radius + 1, cv.CV_8S )
    cv.Set( kernel[:, :], 1 )
    cv.Filter2D( image, img, kernel )
    return img

# sums up values of given piece of image - checks for borders
def safe_subsum( mat, startx, starty, stopx, stopy ):
    list = np.asarray( mat[max( starty, 0 ):min( stopy, mat.height - 1 ), max( startx, 0 ):min( stopx, mat.width - 1 )] ).reshape( -1 ).tolist()
    return sum( list )
    
# hard-coded: margin

def get_neighbors( img, point ):
    neighbors = set()
    for dx in range( -1, 2 ):
        for dy in range( -1, 2 ):
            x = point[0] + dx
            y = point[1] + dy
            if x >= 0 and x < img.width and y >= 0 and y < img.height: # and ( dx == 0 or dy == 0 ):
                neighbors.add( ( x, y ) )
    return neighbors

# region consists of one element only 
def create_region( img, point, cutoff=10 ):
    region = set()
    region.add( point )
    neighbors = get_neighbors( img, point )
    while len( neighbors ) > 0:
        far_neighbors = set()
        for neighbor in neighbors:
            if neighbor not in region and merge_criterion( img, point, neighbor, cutoff ):
                region.add( neighbor )
                for far_neighbor in get_neighbors( img, neighbor ):
                    if far_neighbor not in region:
                        far_neighbors.add( far_neighbor )
        neighbors = far_neighbors
    return region

def merge_criterion( img, pt_one, pt_two, cutoff=10 ):
    # print "pt_one: " + str( pt_one ) + ", pt_two: " + str( pt_two )+", H:"+str(img.height)+", W:"+str(img.width)
    difference = abs( img[pt_one[1], pt_one[0]] - img[pt_two[1], pt_two[0]] ) 
    return difference < cutoff

def region_start( region ):
    minx = 100000
    miny = 100000
    for point in region:
        minx = min( point[0], minx )
        miny = min( point[1], miny )
    return ( minx, miny )

def region_leanness( region ):
    return ( region_height( region ) + 0.0 ) / region_width( region )

def region_height( region ):
    return region_span( region, 1 )

def region_width( region ):
    return region_span( region, 0 )

def region_center( region ):
    sumx = sumy = 0
    for point in region:
        sumy += point[1]
        sumx += point[0]
    return ( 1.0 * sumx / len( region ), 1.0 * sumy / len( region ) )
    
def region_span( region, axis_index ):
    minh = 100000
    maxh = -100000
    for point in region:
        minh = min( point[axis_index], minh )
        maxh = max( point[axis_index], maxh )
    return maxh - minh + 1


# returns a cropped image object
def cropImage( image, xstart, ystart, xend, yend ):
    width = min( xend, image.width - 1 ) - xstart
    height = min( yend, image.height - 1 ) - ystart
    cv.CreateImage
    cropped = cv.CreateImage( ( width, height ), image.depth, image.nChannels )
    src_region = cv.GetSubRect( image, ( max( xstart, 0 ), max( ystart, 0 ), width, height ) )
    cv.Copy( src_region, cropped )
    return cropped

# under construction
def grow_regions( image, seed_color=0, eaten_color=255 ):
    print "growing regions"
    selected_regions = list()
    for y in range( 0, image.height ):
        for x in range( 0, image.width ):
            if image[y, x] == seed_color:
                region = create_region( image, ( x, y ), 25 )
                if len( region ) > 1:
                    selected_regions.append( region )
                
                for point in region:
                    image[point[1], point[0]] = eaten_color;
    paint_regions( selected_regions, image )
    return selected_regions


def kill_the_losers( org_img, regions ):
    sf = 4
    truth = True
    while truth:
        truth = False
        for i in range( 0, len( regions ) ):
            # ratio_criterion = len( regions[i] ) * sf / region_height( regions[i] ) < region_width( regions[i] ) or len( regions[i] ) * sf / region_width( regions[i] ) < region_height( regions[i] ) 
            length_criterion = region_width( regions[i] ) < 1.5 * region_height( regions[i] )
            height_criterion = region_height( regions[i] ) < org_img.height * 0.02
            size_criterion = len( regions[i] ) < 4 * optimal_radius( org_img )
            if size_criterion or length_criterion or height_criterion:
                truth = True
                regions.pop( i )
                break;
    paint_regions( regions, org_img )
    return regions
                


def cluster_regions ( org_img, regions ):
    
    starts = list()
    heights = list()
    widths = list()
    centers = list()

    # init data for merging    
    for region in regions:
        start = region_start( region )
        starts.append( start )
        heights.append( region_height( region ) )
        widths.append( region_width( region ) )
        centers.append( region_center( region ) )
    #
    
    truth = True
    iterations_counter = 0
    while truth and len( regions ) > 1:
        iterations_counter += 1
        if iterations_counter % 25 == 0:
            print "clustering regions..."
        for i in range( 0, len( regions ) ):
            truth = False
            for j in range( i + 1, len( regions ) ):
                joint_mass = len( regions[i] ) + len( regions[j] )
                gravity_criterion = ( ( centers[i][0] - centers[j][0] ) ** 2 + ( centers[i][1] - centers[j][1] ) ** 2 ) < 50 * ( joint_mass ) 
                horizontal_criterion = ( centers[i][0] - centers[j][0] ) ** 2 < 40 * joint_mass
                vertical_criterion = ( centers[i][1] - centers[j][1] ) ** 2 < joint_mass
                width_criterion = widths[i] + widths[j] + abs( centers[i][0] - centers[j][0] ) < 2 * len( regions[i] ) + len( regions[j] )
                if ( gravity_criterion and width_criterion and vertical_criterion and horizontal_criterion ):
                    truth = True
                    regions[i] = regions[i].union( regions[j] )
                    heights[i] = region_height( regions[i] )
                    widths[i] = region_width( regions[i] )
                    starts[i] = region_start( regions[i] )
                    centers[i] = region_center( regions[i] )
                    
                    regions.pop( j )
                    heights.pop( j )
                    widths.pop( j )
                    centers.pop( j )
                    starts.pop( j )
                    break;
            if truth:
                break
    paint_regions( regions, org_img )
    print "all suitable regions merged"
    return regions
    
def paint_regions( regions, image, caption="cpoo" ):
    res = cv.CreateImage( cv.GetSize( image ), image.depth, 3 )
    # assumes 3-channeled original    
    for x in range ( 0, res.width ):
        for y in range( 0, res.height ):
            res[y, x] = ( 0, 0, 0 )
    colors = [( 0, 0, 255 ), ( 0, 255, 0 ), ( 255, 0, 0 ), ( 255, 0, 255 ), ( 255, 255, 0 ), ( 0, 0, 255 ), ( 255, 255, 255 )]
    color_counter = 0;

    for region in regions:
        color_counter = ( ( color_counter + 1 ) % len( colors ) )
        for point in region:
            res[point[1], point[0]] = colors[color_counter]
    show_wait( res, caption )
    
def compare_pixels( p1, p2, tolerance ):
    diff = 0
    if isinstance ( p1, collections.Iterable ):
        temp_tolerance = 0
        for dim in range( 0, len( p1 ) ):
            diff += abs( p1[dim] - p2[dim] )
            temp_tolerance += tolerance
        tolerance = temp_tolerance
    else:
        diff = abs( p1 - p2 );
    return diff < tolerance
    

# returns a window median_radius size for processing
def optimal_radius( picture ):
    return picture.height / 30
                
# returns a global map of text-related energy for the picture
def text_energy_map( image ):
    image = split_channels( image )
    radius = optimal_radius( image )
    
    laplac = laplacian( image )
    laplac = gaussian_blur_icl( laplac, ( radius, radius ), radius )
    
    # init compass results
    result_0 = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
    result_45 = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
    result_90 = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
    result_135 = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )

    # compass one: operator & result
    ker_0 = cv.CreateMat( 3, 3, cv.CV_8S )
    cv.Set( ker_0, -1 )
    cv.Set( ker_0[1, :], 2 )
    cv.Filter2D( image, result_0, ker_0 )
    
    # compass two
    ker_45 = cv.CreateMat( 3, 3, cv.CV_8S )
    cv.Set( ker_45, -1 )
    ker_45[2, 2] = ker_45[1, 1] = ker_45[0, 0] = 2;
    cv.Filter2D( image, result_45, ker_45 )
    
    # compass three
    ker_90 = cv.CreateMat( 3, 3, cv.CV_8S )
    cv.Set( ker_90, -1 )
    cv.Set( ker_90[:, 1], 2 )
    cv.Filter2D( image, result_90, ker_90 )

    # compass four
    ker_135 = cv.CreateMat( 3, 3, cv.CV_8S )
    cv.Set( ker_135, -1 )
    ker_135[2, 0] = ker_135[1, 1] = ker_135[0, 2] = 2;
    cv.Filter2D( image, result_135, ker_135 )

    # prepare result image and temporary helper
    density = cv.CreateImage( cv.GetSize( image ), image.depth, 1 )
    temp = cv.CreateImage( cv.GetSize( image ), image.depth, 1 )
    cv.AddWeighted( result_0, 0.5, result_90, 0.5, 0, density )
    cv.AddWeighted( result_45, 0.5, result_135, 0.5, 0, temp )
    cv.AddWeighted( temp, 0.5, density, 0.5, 0, density )
    # display obtained (weighted sum) of constituent images
    # blur the density to highlight areas
    density = gaussian_blur_icl( density, ( 3, 3 ), radius )
    # displays again
    # show_wait( density, "cpoo" )

    # create a map of pixel weights - proportional to a total of orientations within window    
    orients = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
    # threshold each picture - simulates summing orientations upon subsequent addition of images
    cv.Threshold( result_0, result_0, 128, 255, 0 )
    cv.Threshold( result_45, result_45, 128, 255, 0 )
    cv.Threshold( result_90, result_90, 128, 255, 0 )
    cv.Threshold( result_135, result_135, 128, 255, 0 )
    # just summing - don't like that part really
    cv.AddWeighted( result_0, 0.5, result_45, 0.5, 0, result_0 )
    cv.AddWeighted( result_90, 0.5, result_135, 0.5, 0, result_90 )
    cv.AddWeighted( result_90, 0.5, result_0, 0.5, 0, orients )
    # show( orients, "orients raw" )
    # equalizing and slightly blurring. not nice really again
    cv.EqualizeHist( orients, orients )
    # orients = gaussian_blur_icl( orients, ( 3, 3 ), radius )
    # show_wait( orients, "cpoo" )

    # manually convolve density and orients
    for x in range( 0, density.width ):
        for y in range( 0, density.height ):
            density[y, x] *= orients[y, x] * laplac[y, x] / ( 255.0 * 255.0 )

    cv.EqualizeHist( density, density )
    return density


def gaussian_blur_icl( image, ksize, sigmaX ):
    a = ksize[0]
    b = ksize[1]
    if a % 2 == 0:
        a += 1;
    if b % 2 == 0:
        b += 1
    return array2cv( cv2.GaussianBlur( cv2array( image ), ( a, b ), sigmaX ) )

def average_pixels( image, dims ):
    kernel = cv.CreateMat( dims[0], dims[1], cv.CV_32F )
    cv.Set( kernel, 1.0 / ( dims[0] * dims[1] ) )
    cv.Filter2D( image, image, kernel )
    return image
    
def laplacian( image ):
    dst = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
    kernel = cv.CreateMat( 3, 3, cv.CV_32F )
    cv.Set( kernel, 1 )
    kernel[0, 1] = kernel[1, 0] = kernel[2, 1] = kernel[1, 2] = 2
    kernel[1, 1] = -12
    cv.Filter2D( image, dst, kernel )
    return dst

# from openCV python wiki
def cv2array( im ):
  depth2dtype = {
        cv.IPL_DEPTH_8U: 'uint8',
        cv.IPL_DEPTH_8S: 'int8',
        cv.IPL_DEPTH_16U: 'uint16',
        cv.IPL_DEPTH_16S: 'int16',
        cv.IPL_DEPTH_32S: 'int32',
        cv.IPL_DEPTH_32F: 'float32',
        cv.IPL_DEPTH_64F: 'float64',
    }

  arrdtype = im.depth
  a = np.fromstring( 
         im.tostring(),
         dtype=depth2dtype[im.depth],
         count=im.width * im.height * im.nChannels )
  a.shape = ( im.height, im.width, im.nChannels )
  return a


# from openCV python wiki
def array2cv( a ):
  dtype2depth = {
        'uint8':   cv.IPL_DEPTH_8U,
        'int8':    cv.IPL_DEPTH_8S,
        'uint16':  cv.IPL_DEPTH_16U,
        'int16':   cv.IPL_DEPTH_16S,
        'int32':   cv.IPL_DEPTH_32S,
        'float32': cv.IPL_DEPTH_32F,
        'float64': cv.IPL_DEPTH_64F,
    }
  try:
    nChannels = a.shape[2]
  except:
    nChannels = 1
  cv_im = cv.CreateImageHeader( ( a.shape[1], a.shape[0] ),
          dtype2depth[str( a.dtype )],
          nChannels )
  cv.SetData( cv_im, a.tostring(),
             a.dtype.itemsize * nChannels * a.shape[1] )
  return cv_im
  
def sum_orients( img, x, y, median_radius, threshold=255 ):
    sum = 0
    for curx in range( max( x - median_radius, 0 ), min( x + median_radius, img.width ) ):
        for cury in range( max( y - median_radius, 0 ), min( y + median_radius, img.height ) ):
            if( img[cury, curx] >= threshold ):
                sum += 1
    return sum

# shortcut for openCV display functions
def show( picture, desc="no_desc" ):
    return
    cv.ShowImage( desc, picture )
    cv.WaitKey()
    
def show_wait( picture, desc="no_desc" ):
    cv.ShowImage( desc, picture )
    cv.WaitKey()

def sum_array( array ):
    return sum( np.asarray( array ).reshape( -1 ).tolist() )

def find_density_maximum( image ):
    i = [0, 0, image.width, image.height]
    
    max_copy = i
    max_density = 0
    while True:
        # print "current density: " + str( max_density )
        for ind in range( 0, 4 ):
            copy = list( i )
            if ind > 1:
                copy[ind] -= 1
            else:
                copy[ind] += 1
            density = sum_array( image[copy[1] : copy[3], copy[0] : copy[2]] ) ** 2 * 1.0 / ( ( copy[3] - copy[1] ) * ( copy[2] - copy[0] ) ) ** 2
            # print "candidate_density: " + str( density )
            if density > max_density:
                max_density = density
                max_copy = copy
        if i == max_copy:
            break
        else:
            i = max_copy
    return max_copy
    
    
