## Reasoning system
## Assumes the type of each image to be: Image = "Word" : [blobs]

import math
import sys
sys.path.append("../imageFeatures_classes")
from AiBlob import compareBlobs as blob_equality

def word_equality(w1, w2):
    if w1 == w2: return 1.0
    else: return 0.0

def build_relations(words, blobs):
    words_rel = {}
#    for word in words:
#        rel = {}
#        for word2 in words:
#            rel[word2] = word_equality(word, word2)
#        words_rel[word] = rel
    blobs_rel = {}
    for blob in blobs:
        rel = {}
        for blob2 in blobs:
            rel[blob2] = blob_equality(blob, blob2)
        blobs_rel[blob] = rel
    return (words_rel, blobs_rel)


def group_2(items, relations, theta):
    group = []
    while len(items) > 0:
        item = items.pop()
        mix = [item]
        relation = relations[item]
        for i in items:
            if relation[i] > theta:
                mix.append(i)
                items.remove(i)
        group.append(mix)
    return group


def build_mixtures(words, blobs, word_rels, blob_rels, theta):
#    word_mix = group_2(words, word_rels, theta)
    word_mix = []
    blob_mix = group_2(blobs, blob_rels, theta)
    return (word_mix, blob_mix)
                
            
def extrapolate(word_mix, blob_mix):
    word_mix.sort(cmp=lambda x,y: cmp(len(y), len(x)))
    blob_mix.sort(cmp=lambda x,y: cmp(len(y), len(x)))

    pairs = []
    for x in range(min(len(word_mix), len(blob_mix))):
        pairs.append((word_mix[x], blob_mix[x]))
    return pairs


def mostCommonWord(words):
    added_words = []
    ws = []
    for w in words:
        if w not in added_words:
            added_words.append(w)
            ws.append((w,1))
        else:
            for (w2,n) in ws:
                if w == w2:
                    ws[ws.index((w2,n))] = (w2,n+1)
    ws.sort(cmp=lambda (w1,n1),(w2,n2): cmp(n2,n1))
    return ws
        


def extrapolate_2(word_mix, blob_mix):
#    word_mix.sort(cmp=lambda x,y: cmp(len(y), len(x)))
    blob_mix.sort(cmp=lambda x,y: cmp(len(y), len(x)))

    used_words = []

    for bs in blob_mix:
        ws = []
        pnrs = []
        for b in bs:
            if not b.page.nr in pnrs:
                pnrs.append(b.page.nr)
                ws.extend(list(b.page.objectWords))

        cw = mostCommonWord(ws)
        cwf = [w for (w,n) in cw if not w in used_words]
        if len(cwf) > 0:
            w = cwf[0]
            used_words.append(w)
            for b in bs:
                if b.tag is None:
                    b.tag = w
                    if __debug__:
                        print 'REASON: added tag "', w, '" to blob: ', b.id
                

# Remove large groups of similar blobs within one
# page. They probably constitue background.
def filterBlobs(pages):
    theta = 0.725
    maxlen = 2
    blobs = []
    for p in pages:
        (wrel, blobrel) = build_relations([], p.blobs)
        groups = group_2(list(p.blobs), blobrel, theta)

        if __debug__:
            print "REASON: groups: ", p.nr, ": ", [len(g) for g in groups]

        filtered = filter((lambda x: len(x) <= maxlen), groups)
        for g in filtered:
            blobs.extend(g)

    return blobs

# Remove words that are mentioned only once
# Not very effective...
def filterWords(words):
   uniqueSet = frozenset(words)
   count = [(item, words.count(item)) for item in uniqueSet]
   dups = [item for (item, c) in count if c > 1]
   return filter((lambda x: x in dups), words)

def reason(pages):
    #TODO: what with info on same pages???
    # preserve page structure with append instead of extend
    theta = 0.8

    #blobs = []
    w = []
    #b = []
    for p in pages:
        w.extend(p.objectWords)
        #b.extend(p.blobs)
    
    # Filtering should not be used with the Color-book
    blobs = filterBlobs(pages)
    words = w #filterWords(w)

    if __debug__:
        print "REASON: Blobs ok: ", ', '.join([str(b.id) for b in blobs])
        print
        print "REASON: Words ok: ", ', '.join(words)
        print

    (rel_w, rel_b) = build_relations(words, blobs)
    if __debug__:
        print "REASON: relations Done"
    (a,b)          = build_mixtures(words, blobs, rel_w, rel_b, theta)
    if __debug__:
        print "REASON: mixtures Done"
    l              = extrapolate_2(a,b)

    if __debug__:
        print "REASON: OK"
#    nrOfB = 0
#    for (ws,bs) in (l[0:4]): #Only the four most common words
#        for blob in bs:
#            
#            blob.tag = ws[0]
#            nrOfB += 1
#    if __debug__:
#        print "REASON: Blobs tagged: ", nrOfB



def reason_selective_with_tags(pages):
    # get the tags.
    theta = 0.8

    w = []
    b = []
    for p in pages:
        w.extend(p.objectWords)
        b.extend(p.blobs)

    words = w #filterWords(w)
    blobs = b #filterBlobs(pages)

    (rel_w, rel_b) = build_relations(words, blobs)
    (a,b)          = build_mixtures(words, blobs, rel_w, rel_b, theta)
    correlation    = extrapolate_2(a,b)
    if __debug__:
        print "REASON: Extrapolate Done"

    for p in pages:
        top_five = []
        lowest = 1
        for b in p.blobs:
            for (ws, bs) in correlation:
                if b in bs:
                    if len(top_five) < 5:
                        top_five.append( (b,ws[0],len(ws)) )
                        top_five.sort(cmp=lambda (b1, w1, n1),(b2, w2, n2): cmp(n1,n2))
                        lowest = len(ws)
                    elif len(ws) >= lowest:
                        top_five.append( (b,ws[0],len(ws)) )
                        top_five.sort(cmp=lambda (b1, w1, n1),(b2, w2, n2): cmp(n2,n1))
                        lowest = len(ws)
                        if len(top_five) > 5: top_five=top_five[:5]
        for (b,w,n) in top_five:
            b.tag = w
    if __debug__:
        print "REASON: OK"
                        
