import os
import math 
import shelve

db = shelve.open('candarts.db')
artdb = shelve.open('artdb.db')
catdb = shelve.open('catdb.db')

F_file = open("bfreq_out.txt","w")
S_file = open("sim_out.txt","w")
P_file = open("pmi_out.txt","w")
SP_file = open("simpmi_out.txt","w")

n = 6059193

def isACategory(somestring):
    if(somestring.find("Category")!=-1):
        return True
    return False

def getLength(somestring):
    return len(somestring.split("|"))

def getPMIScore(article,category):
    return float(n)/(getLength(artdb[article])*getLength(catdb[category]))

def normalizePMIScores(pmiHash):
    length = float(0)

    for eachKey in pmiHash.keys():
        length += pmiHash[eachKey]

    length = math.sqrt(length)

    for eachKey in pmiHash.keys():
        pmiHash[eachKey] /= length

    return pmiHash

def getIntersection(listA,listB):
        return list(set(listA).intersection(set(listB)))

def getSimScore(pmiHashA,pmiHashB):
    commoncats = getIntersection(pmiHashA.keys(),pmiHashB.keys())
    simscore = float(0)
    if(len(commoncats) > 0):
        for eachCat in commoncats:
            simscore += pmiHashA[eachCat]*pmiHashB[eachCat]
        return simscore
    else:
        return float(-1)

def reverseSortHash(someHash):
    values = {}
    resultList = []
    
    for eachItem in someHash:
        if(values.has_key(someHash[eachItem]) == False):
            values[someHash[eachItem]] = []
            values[someHash[eachItem]].append(eachItem)
            
    listValues = values.keys()
    listValues.sort()
    listValues.reverse()
    
    for eachValue in listValues:
        for eachItem in values[eachValue]:
            #print "\t",eachItem,eachValue
            resultList.append(eachItem)

    return resultList



for eachArticle in db.keys():
    if(isACategory(eachArticle) == False):

        #############Calculate PMI scores and Normalize between current article and its categories####################################
        
        pmirankedCats = {} # This contains pmi ranked categories for current article which will be necessary for similarity calculation

        for eachCategory in artdb[eachArticle].split("|"):
            pmirankedCats[eachCategory] = getPMIScore(eachArticle,eachCategory)
        
        pmirankedCats = normalizePMIScores(pmirankedCats)
        #############################################################################################################################
        
        for eachMissingCategory in artdb[eachArticle].split("|"):
            freq = {}
            sim = {}
            pmi = {}
            simpmi = {}
            print eachArticle,eachMissingCategory
            #1000 is the cut off here 
            if(getLength(catdb[eachMissingCategory]) < 1000):
                for eachCategory in artdb[eachArticle].split("|"):

                    #1000 is the cut off here 
                    if(eachMissingCategory != eachCategory and getLength(catdb[eachCategory]) < 1000):            
                        #print eachArticle,eachMissingCategory,eachCategory,getPMIScore(eachArticle,eachCategory)

                        for eachSecondLevelArticle in catdb[eachCategory].split("|"):
                            
                            #Skip eachSecondLevel Article if its equal to current article
                            if(eachSecondLevelArticle == eachArticle):
                                continue

                            #print "\t",eachSecondLevelArticle,eachCategory,getPMIScore(eachSecondLevelArticle,eachCategory)

                            #################################Calulate and Normalize PMI ranks for second level categories################
                            secondLevelPmirankedCats = {}
                            
                            for eachCandidateLable in artdb[eachSecondLevelArticle].split("|"):
                                secondLevelPmirankedCats[eachCandidateLable] = getPMIScore(eachSecondLevelArticle,eachCandidateLable)
                                #print "\t\t",eachSecondLevelArticle,eachCandidateLable,getPMIScore(eachSecondLevelArticle,eachCandidateLable)

                            secondLevelPmirankedCats = normalizePMIScores(secondLevelPmirankedCats)
                            #############################################################################################################

                            ###################################Caclulate Similarity between two articles################################
                            simscore = getSimScore(pmirankedCats,secondLevelPmirankedCats)
                            #print eachArticle,eachSecondLevelArticle,simscore
                            ###########################################################################################################

                            ###################################Update All Hash with proper values#######################################
                            for eachCandidateLable in artdb[eachSecondLevelArticle].split("|"):
                                if(freq.has_key(eachCandidateLable) == False):
                                    freq[eachCandidateLable] = 1
                                    sim[eachCandidateLable] = float(0)
                                    pmi[eachCandidateLable] = float(0)
                                    simpmi[eachCandidateLable] = float(0)

                                freq[eachCandidateLable] += 1
                                sim[eachCandidateLable] += simscore
                                pmi[eachCandidateLable] += (pmirankedCats[eachCategory]*secondLevelPmirankedCats[eachCandidateLable]*getPMIScore(eachSecondLevelArticle,eachCategory))
                                simpmi[eachCandidateLable] += (simscore*secondLevelPmirankedCats[eachCandidateLable])
                                    
                            #############################################################################################################

        #######################################################Rank all hashes based on scores##################################################

            frank = -1
            srank = -1
            prank = -1
            sprank = -1

            resultString = eachArticle +"\t"+" ".join(pmirankedCats.keys())+"\t"+eachMissingCategory
            
            tmpList = reverseSortHash(freq)
            
            if(eachMissingCategory in tmpList):
                print "Freq:",tmpList.index(eachMissingCategory)
                F_file.write(resultString+"\t"+str(tmpList.index(eachMissingCategory))+"\t"+" ".join(tmpList)+"\n")
            else:
                F_file.write(resultString+"\t"+str(-1)+"\t"+" ".join(tmpList)+"\n")
                
            tmpList = reverseSortHash(sim)
            if(eachMissingCategory in tmpList):
                print "Sim:",tmpList.index(eachMissingCategory)
                S_file.write(resultString+"\t"+str(tmpList.index(eachMissingCategory))+"\t"+" ".join(tmpList)+"\n")
            else:
                S_file.write(resultString+"\t"+str(-1)+"\t"+" ".join(tmpList)+"\n")
                
            tmpList = reverseSortHash(pmi)
            if(eachMissingCategory in tmpList):
                print "Pmi:",tmpList.index(eachMissingCategory)
                P_file.write(resultString+"\t"+str(tmpList.index(eachMissingCategory))+"\t"+" ".join(tmpList)+"\n")
            else:
                P_file.write(resultString+"\t"+str(-1)+"\t"+" ".join(tmpList)+"\n")
            
            tmpList = reverseSortHash(simpmi)
            if(eachMissingCategory in tmpList):
                print "SimPmi:",tmpList.index(eachMissingCategory)
                SP_file.write(resultString+"\t"+str(tmpList.index(eachMissingCategory))+"\t"+" ".join(tmpList)+"\n")
            else:
                SP_file.write(resultString+"\t"+str(-1)+"\t"+" ".join(tmpList)+"\n")

F_file.close()
S_file.close()
P_file.close()
SP_file.close()
