# The following is the normalized posting hit routine
from __future__ import division
from operator import itemgetter, attrgetter
from struct import *
import copy
import math
import os
import random
import sys
import time
from sets import Set
from random import randint
import re
from os import walk
from subprocess import call
import os

print "Begins..."
'''
# step0:
selectedDocDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_from50M/documentPartitionFileBasedOnDocHitsDividedBySize_20141102_1%"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    docID = le[0]
    selectedDocDict[docID] = 1
    l = ifh.readline()
ifh.close()
print "len(selectedDocDict):",len(selectedDocDict)
'''

'''
# step1: load the pts.
# takes 1 min to load the probabilities
termIDAndProbabilityDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/data/LMs/GOV2/wholeLexicon_GOV2_unigramProbablity_fromJuan_20140707.binary"
ifh = open(ifn0,"rb")
##########
statinfo = os.stat(ifn0)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn0
print "file size:",fileSize
numOfBytesRead = 0
lineCounter = 1
while numOfBytesRead < fileSize:
    byteString = ifh.read(4 + 4)
    (termID,probability) = unpack( "1I1f", byteString)
    termIDAndProbabilityDict[termID] = probability 
    # print termID,probability
    lineCounter += 1
    #if lineCounter == 100:
    #    break
    numOfBytesRead += 8
##########
print "Overall:"
print "len(termIDAndProbabilityDict):",len(termIDAndProbabilityDict)
ifh.close()
'''

# step2: load the document posting array about posting hit and pt to play.
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/docHitsANDPostingHit/GOV2_documentPostingArray_0M_1M_docHit_postingHit_pt_added"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (docIDFromFile,numOfPostings,numDocHit) = unpack( "3I", byteString)
    strongPostingDict = {}
    weakPostingDict = {}
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
        (termID,impactScore,postingHit,pt) = unpack( "1I1f1I1f", byteString)
        if postingHit >= 1:
            strongPostingDict[termID] = 1
        else:
            weakPostingDict[termID] = (termID,pt,0,0,0,0,0)
        print "----->",i,termID,impactScore,postingHit,pt
    numOfBytesRead += 12 + numOfPostings * 16
    print docIDFromFile,numOfPostings,len(strongPostingDict),len(weakPostingDict)
    
    if docIDFromFile == 0:
        break
inputFileHandler0.close()
print "Ends."
print "Overall:"
print "ifn:",ifn
inputFileHandler0.close()

# step3: load the document posting array about UPP promise
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/GOV2_documentPostingArray_0M_1M"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4)
    (docIDFromFile,numOfPostings) = unpack( "2I", byteString)
    print docIDFromFile,numOfPostings
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
        print termID,static,dynamic,combined,score1
        if termID in weakPostingDict:
            (_,pt,_,_,_,_,_) = weakPostingDict[termID]
            PTopKWithEssentiality = combined / pt
            ptPowToMinusOne_UPPPromise_withEssentiality = pow(pt,-1) * PTopKWithEssentiality
            ptPowToMinusPoint5_UPPPromise_withEssentiality = pow(pt,-0.5) * PTopKWithEssentiality
            ptPowToZero_UPPPromise_withEssentiality = pow(pt,0) * PTopKWithEssentiality
            ptPowToPoint5_UPPPromise_withEssentiality = pow(pt,0.5) * PTopKWithEssentiality
            ptPowToOne_UPPPromise_withEssentiality = pow(pt,1) * PTopKWithEssentiality
            '''
            print pt,","
            print PTopKWithEssentiality,","
            print ptPowToMinusOne_UPPPromise_withEssentiality,","
            print ptPowToMinusPoint5_UPPPromise_withEssentiality,","
            print ptPowToZero_UPPPromise_withEssentiality,","
            print ptPowToPoint5_UPPPromise_withEssentiality,","
            print ptPowToOne_UPPPromise_withEssentiality,","
            print
            '''
            weakPostingDict[termID] = (termID,pt,ptPowToMinusOne_UPPPromise_withEssentiality,ptPowToMinusPoint5_UPPPromise_withEssentiality,ptPowToZero_UPPPromise_withEssentiality,ptPowToPoint5_UPPPromise_withEssentiality,ptPowToOne_UPPPromise_withEssentiality)
    
    weakPostingList = []
    for tuple in weakPostingDict:
        weakPostingList.append(weakPostingDict[tuple])
    print "len(weakPostingList):"
    weakPostingList.sort(cmp=None, key=itemgetter(6), reverse=True)
    for tuple in weakPostingList:
        print tuple
    numOfBytesRead += 8 + numOfPostings * 20
    if docIDFromFile == 0:
        break
inputFileHandler0.close()
print "Ends."
print "Overall:"
print "ifn:",ifn
inputFileHandler0.close()
print "Ends."













