from __future__ import division

from operator import itemgetter, attrgetter
from struct import *
# import gc
import copy
import math
import os
import random
import sys
import time
from sets import Set
from random import randint
import re
# import numpy as np
from os import walk
# import matplotlib.pyplot as plt
from subprocess import call
import os

docID_num_postings_dict = {}
# key: docID
# value: a list with the terms in parsing order
docID_terms_dict = {}
ofn = "/home/weijiang/workspace/NYU_IRTK/polyIRToolkit_Wei/random_document_bigrams"
ofh = open(ofn,"w")
ifn = "/home/weijiang/workspace/NYU_IRTK/polyIRToolkit_Wei/random_document_terms"
ifh = open(ifn,"r")
for l in ifh.readlines():
    le = l.strip().split(" ")
    curr_doc = int(le[1])
    curr_term = le[2].lower()
    if curr_doc not in docID_terms_dict:
        docID_terms_dict[curr_doc] = []
        docID_terms_dict[curr_doc].append(curr_term)
    else:
        docID_terms_dict[curr_doc].append(curr_term)
    if curr_doc not in docID_num_postings_dict:
        docID_num_postings_dict[curr_doc] = 1
    else:
        docID_num_postings_dict[curr_doc] += 1

print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
# print len(docID_terms_dict[0])
# print docID_terms_dict[0]
for i in range(0,1092):
# for i in range(0,1):
    # print i,len(docID_terms_dict[i])
    for index,term1 in enumerate(docID_terms_dict[i][:-1]):
        bigram = term1 + " " + docID_terms_dict[i][index+1]
        output_line = str(i) + " " + str(index) + " " + str(bigram) + "\n"
        ofh.write(output_line)

ifh.close()
ofh.close()
exit(1)

ofn = "/home/weijiang/workspace/NYU_IRTK/polyIRToolkit_Wei/docIDs"
ofh = open(ofn,"w")
for i in range(0,1092):
    ofh.write(str(i) + "\n")
ofh.close()
print "Overall:"
print "ofn:",ofn
exit(1)

term_dict = {}
ifn = "/home/weijiang/workspace/NYU_IRTK/polyIRToolkit_Wei/debug2"
ifh = open(ifn,"r")
for l in ifh.readlines():
    le = l.strip().split(" ")
    term = le[1]
    term = term.lower()
    if term not in term_dict:
        term_dict[term] = 1
    else:
        term_dict[term] += 1
print "Overall:"
print "len(term_dict):",len(term_dict)
for term in term_dict:
    # temp_str = "*" + term + "*"
    # print temp_str
    print term
exit(1)

docIDAndTrecIDDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2/gov2_trecID_docID_MappingTable"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 0
while l:
    le = l.strip().split(" ")
    currentTrec = le[0]
    currentDoc = le[1]
    docIDAndTrecIDDict[currentDoc] = currentTrec
    if lineCounter % 1000000 == 0:
        print "lineCounter:",lineCounter,"processed."
    l = ifh.readline()
    lineCounter += 1
ifh.close()
print "len(docIDAndTrecIDDict):",len(docIDAndTrecIDDict)

basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
fileList = []
ifn0 = "rawResults_trec_0M_1M_statistics_5%_h_10_TOP40%_docs"
ifn1 = "rawResults_trec_0M_1M_statistics_70%_h_10_TOP10%_docs"
fileList.append(ifn0)
fileList.append(ifn1)

for ifn in fileList:
    completeFileName = basePath + ifn
    ofn = basePath + ifn + "_fixed"
    ofh = open(ofn,"w")
    ifh = open(completeFileName,"r")
    for line in ifh.readlines():
        le = line.strip().split(" ")
        # print le
        if len(le) == 7 and le[-2] == "NYU_IRTK":
            if le[6] not in docIDAndTrecIDDict:
                ofh.write(le[0] + " " + le[1] + " " + le[2] + " " + le[3] + " " + le[4] + " " + le[5] + " " + le[6] + "\n")
            else:
                ofh.write(le[0] + " " + le[1] + " " + docIDAndTrecIDDict[le[6]] + " " + le[3] + " " + le[4] + " " + le[5] + " " + le[6] + "\n")
    print "ifn:",completeFileName
    print "ofn:",ofn
    print
    ofh.close()
exit(1)

# unpruned case
print "unpruned case"
rankList = ["1","2","3","4","5","6","7","8","9","10"]
documentResultDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/results/GOV2/TOP10_documentResults_OR_unpruned_testingQueries_20141020"
ifh0 = open(ifn0,"r")
l = ifh0.readline()
currentQID = ""
while l:
    le = l.strip().split(" ")
    currentQID = le[0]
    currentDocID = le[3]
    documentResult = currentQID + "_" + currentDocID
    if documentResult not in documentResultDict:
        documentResultDict[documentResult] = 1
    l = ifh0.readline()

print "len(documentResultDict):",len(documentResultDict)
ifh0.close()

'''
# The head of the 05 efficiency task testing queries
# unpruned case
print "unpruned case"
rankList = ["1","2","3","4","5","6","7","8","9","10"]
documentResultDict = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResult_unpruned_forEvaluation_20141206_05EffeciencyTaskTestingQueries"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        le = l.strip().split(" ")
        qid = le[1]

    if len(le) == 3 and le[0] in rankList:
        currentDocResult = qid + "_" + le[2]
        if currentDocResult not in documentResultDict:
            documentResultDict[currentDocResult] = 1
    l = ifh1.readline()
ifh1.close()
print "len(documentResultDict):",len(documentResultDict)
'''

fileList = []
'''
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_1%_UPP-5"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_2%_UPP-5"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_3%_UPP-5"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_4%_UPP-5"
ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_5%_UPP-5"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_6%_UPP-5"
ifn6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_7%_UPP-5"
ifn7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_8%_UPP-5"
ifn8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_9%_UPP-5"
ifn9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_10%_UPP-5"
fileList.append(ifn0)
fileList.append(ifn1)
fileList.append(ifn2)
fileList.append(ifn3)
fileList.append(ifn4)
fileList.append(ifn5)
fileList.append(ifn6)
fileList.append(ifn7)
fileList.append(ifn8)
fileList.append(ifn9)
'''

'''
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_1%_normalizedPostingHit"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_2%_normalizedPostingHit"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_5%_normalizedPostingHit"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_10%_normalizedPostingHit"
fileList.append(ifn0)
fileList.append(ifn1)
fileList.append(ifn2)
fileList.append(ifn3)
'''

'''
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_1%_postingHit"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_5%_postingHit"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_10%_postingHit"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_15%_postingHit"
ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_20%_postingHit"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_30%_postingHit"
fileList.append(ifn0)
fileList.append(ifn1)
fileList.append(ifn2)
fileList.append(ifn3)
fileList.append(ifn4)
fileList.append(ifn5)
'''

'''
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_1%_docHit"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_2%_docHit"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_5%_docHit"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_10%_docHit"
ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_20%_docHit"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_30%_docHit"
ifn6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_40%_docHit"
ifn7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_50%_docHit"

fileList.append(ifn0)
fileList.append(ifn1)
fileList.append(ifn2)
fileList.append(ifn3)
fileList.append(ifn4)
fileList.append(ifn5)
fileList.append(ifn6)
fileList.append(ifn7)
'''

'''
# using the 05 efficiency track testing queries
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_1%_hybrid_m_100_h_1"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_1%_docHitsDividedByDocSize"

ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_5%_hybrid_m_100_h_1"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_5%_docHitsDividedByDocSize"

ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_10%_hybrid_m_100_h_1"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_10%_docHitsDividedByDocSize"

ifn6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_20%_hybrid_m_100_h_1"
ifn7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_20%_docHitsDividedByDocSize"

ifn8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_30%_hybrid_m_100_h_1"
ifn9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_30%_docHitsDividedByDocSize"

ifn10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_40%_hybrid_m_100_h_1"
ifn11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_40%_docHitsDividedByDocSize"

ifn12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_50%_hybrid_m_100_h_1"
ifn13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141211_50%_docHitsDividedByDocSize"

fileList.append(ifn0)
fileList.append(ifn2)
fileList.append(ifn4)
fileList.append(ifn6)
fileList.append(ifn8)
fileList.append(ifn10)
fileList.append(ifn12)

fileList.append(ifn1)
fileList.append(ifn3)
fileList.append(ifn5)
fileList.append(ifn7)
fileList.append(ifn9)
fileList.append(ifn11)
fileList.append(ifn13)
'''

'''
# using the 05 efficiency track testing queries
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_1%_UPP-5_05_testing"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_5%_UPP-5_05_testing"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_10%_UPP-5_05_testing"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_20%_UPP-5_05_testing"
ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_30%_UPP-5_05_testing"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_40%_UPP-5_05_testing"
ifn6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_50%_UPP-5_05_testing"

fileList.append(ifn0)
fileList.append(ifn1)
fileList.append(ifn2)
fileList.append(ifn3)
fileList.append(ifn4)
fileList.append(ifn5)
fileList.append(ifn6)
'''

'''
# using the 06 efficiency track testing queries
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_1%_hybrid_m_100_h_1_06_testing"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_1%_docHitsDividedByDocSize_06_testing"

ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_5%_hybrid_m_100_h_1_06_testing"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_5%_docHitsDividedByDocSize_06_testing"

ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_10%_hybrid_m_100_h_1_06_testing"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_10%_docHitsDividedByDocSize_06_testing"

ifn6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_20%_hybrid_m_100_h_1_06_testing"
ifn7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_20%_docHitsDividedByDocSize_06_testing"

ifn8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_30%_hybrid_m_100_h_1_06_testing"
ifn9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_30%_docHitsDividedByDocSize_06_testing"

ifn10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_40%_hybrid_m_100_h_1_06_testing"
ifn11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_40%_docHitsDividedByDocSize_06_testing"

ifn12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_50%_hybrid_m_100_h_1_06_testing"
ifn13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_50%_docHitsDividedByDocSize_06_testing"

fileList.append(ifn0)
fileList.append(ifn2)
fileList.append(ifn4)
fileList.append(ifn6)
fileList.append(ifn8)
fileList.append(ifn10)
fileList.append(ifn12)
fileList.append(ifn1)
fileList.append(ifn3)
fileList.append(ifn5)
fileList.append(ifn7)
fileList.append(ifn9)
fileList.append(ifn11)
fileList.append(ifn13)
'''


# using the 06 efficiency track testing queries
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_1%_UPP-5_06_testing"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_5%_UPP-5_06_testing"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_10%_UPP-5_06_testing"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_20%_UPP-5_06_testing"
ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_30%_UPP-5_06_testing"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_40%_UPP-5_06_testing"
ifn6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_50%_UPP-5_06_testing"
ifn7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141217_5%_hybrid_0M_1M_statistics_30%_h_10_06_testing"
# attempt @ 20141218
ifn8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141217_8%_hybrid_0M_1M_statistics_5%_h_10_06_TOP40%_docs_testing"
ifn9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141217_8%_hybrid_0M_1M_statistics_70%_h_10_06_TOP10%_docs_testing"

fileList.append(ifn0)
fileList.append(ifn1)
fileList.append(ifn2)
fileList.append(ifn3)
fileList.append(ifn4)
fileList.append(ifn5)
fileList.append(ifn6)
fileList.append(ifn7)
fileList.append(ifn8)
fileList.append(ifn9)


'''
# using the 05 efficiency track testing queries
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141214_1%_hybrid_m_100_h_10_05_testing"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141214_5%_hybrid_m_100_h_10_05_testing"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141214_10%_hybrid_m_100_h_10_05_testing"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141214_20%_hybrid_m_100_h_10_05_testing"
ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141214_30%_hybrid_m_100_h_10_05_testing"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141214_40%_hybrid_m_100_h_10_05_testing"
ifn6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141214_50%_hybrid_m_100_h_10_05_testing"
ifn7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141217_5%_hybrid_0M_1M_statistics_30%_h_10_05_testing"
ifn8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141217_8%_hybrid_0M_1M_statistics_5%_h_10_05_TOP40%_docs_testing"
ifn9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141217_8%_hybrid_0M_1M_statistics_70%_h_10_05_TOP10%_docs_testing"

fileList.append(ifn0)
fileList.append(ifn1)
fileList.append(ifn2)
fileList.append(ifn3)
fileList.append(ifn4)
fileList.append(ifn5)
fileList.append(ifn6)
fileList.append(ifn8)
fileList.append(ifn9)
'''

for index,ifn in enumerate(fileList):
    counter = 0
    ifh1 = open(ifn,"r")
    l = ifh1.readline()
    while l:
        le = l.strip().split(" ")
        if l.strip().startswith("qid"):
            le = l.strip().split(" ")
            qid = le[1]
    
        if len(le) == 3 and le[0] in rankList:
            currentDocResult = qid + "_" + le[2]
            # print currentDocResult
            if currentDocResult in documentResultDict:
                counter += 1
                # exit(1)
        l = ifh1.readline()
    ifh1.close()
    print counter/len(documentResultDict)
exit(1)



tempCounter0 = 0
tempCounter1 = 0
tempCounter2 = 0
tempCounter3 = 0
tempCounter4 = 0
tempCounter5 = 0
tempCounter6 = 0
tempCounter7 = 0
tempCounter8 = 0
tempCounter9 = 0


docIDAndNumOfPostingsDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2/gov2_docID_trecID_numOfPostings"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 1
while l:
	le = l.strip().split(" ")
	docID = le[0]
	numOfPostings = int(le[2])
	docIDAndNumOfPostingsDict[docID] = numOfPostings 
	l = ifh.readline()
	lineCounter += 1
	if lineCounter % 2000000 == 0:
		break
ifh.close()
print "Overall:"
print "len(docIDAndNumOfPostingsDict):",len(docIDAndNumOfPostingsDict)
print "docIDAndNumOfPostingsDict['0']:",docIDAndNumOfPostingsDict["0"]

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/debugDocSize_20141218_1M_2M_doc%"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/debugDocSize_20141218_1M_2M"
ifh = open(ifn,"r")

l = ifh.readline()
while l:
	le = l.strip().split(" ")
	docID = le[0]
	numOfPostings = int(le[1])
	docPercentage = numOfPostings / docIDAndNumOfPostingsDict[docID]
	ofh.write(docID + " " + str(numOfPostings) + " " + str(docIDAndNumOfPostingsDict[docID]) + " " + str(docPercentage) + "\n")
	l = ifh.readline()
	
	if docPercentage >= 0 and docPercentage < 0.1:
		tempCounter0 += 1
	if docPercentage >= 0.1 and docPercentage < 0.2:
		tempCounter1 += 1
	if docPercentage >= 0.2 and docPercentage < 0.3:
		tempCounter2 += 1
	if docPercentage >= 0.3 and docPercentage < 0.4:
		tempCounter3 += 1
	if docPercentage >= 0.4 and docPercentage < 0.5:
		tempCounter4 += 1
	if docPercentage >= 0.5 and docPercentage < 0.6:
		tempCounter5 += 1
	if docPercentage >= 0.6 and docPercentage < 0.7:
		tempCounter6 += 1
	if docPercentage >= 0.7 and docPercentage < 0.8:
		tempCounter7 += 1
	if docPercentage >= 0.8 and docPercentage < 0.9:
		tempCounter8 += 1
	if docPercentage >= 0.9 and docPercentage <= 1:
		tempCounter9 += 1

print "Overall:"
print "ofn:",ofn
print "0:",tempCounter0 / 272215
print "1:",tempCounter1 / 272215
print "2:",tempCounter2 / 272215
print "3:",tempCounter3 / 272215
print "4:",tempCounter4 / 272215
print "5:",tempCounter5 / 272215
print "6:",tempCounter6 / 272215
print "7:",tempCounter7 / 272215
print "8:",tempCounter8 / 272215
print "9:",tempCounter9 / 272215
ifh.close()
ofh.close()
exit(1)






'''
termListLengthDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2/gov2_wholeLexiconTermID_Term_ListLength_sortedByAlphabeticalOrder"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 1
while l:
	le = l.strip().split(" ")
	term = le[1]
	listLength = int(le[2])
	termListLengthDict[term] = listLength 
	l = ifh.readline()
	lineCounter += 1
	if lineCounter % 1000000 == 0:
		print "lineCounter:",lineCounter
ifh.close()
print "Overall:"
print "len(termListLengthDict):",len(termListLengthDict)
print termListLengthDict["00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"]

termDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/95KQ.b"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 1
numSingleTermQuery = 0
term0 = ""
term1 = ""
term2 = ""
while l:
	le = l.strip().split(" ")
	if len(le) == 1:
		term0 = le[0]
		#print l.strip()
		#numSingleTermQuery += 1
	elif len(le) == 2:
		term1 = le[0]
		term2 = le[1]
	else:
		pass

	if term0 not in termDict:
		termDict[term0] = 1
	if term1 not in termDict:
		termDict[term1] = 1
	if term2 not in termDict:
		termDict[term2] = 1
	l = ifh.readline()
	lineCounter += 1
ifh.close()

print "******"
for term in termDict:
	if term not in termListLengthDict:
		print term,0
	else:
		print term,termListLengthDict[term]
print "******"

print "Overall:"
print "len(termDict):",len(termDict)
print "lineCounter:",lineCounter
print "numSingleTermQuery:",numSingleTermQuery
exit(1)
'''

# This is the thing we are going to target:
# 4.79607500381e-08 2 14709019 5%
# 2.99096321044e-08 3 29418037 10%
# 1.75526757573e-08 6 58836074 20%
# 1.2406995431e-08 9 88254110 30%
# 1.06401341071e-08 9 102963126 35%
# 9.23766840799e-09 16 117672147 40%
# 7.14360837151e-09 12 147090185 50%
# 5.61737234506e-09 12 176508213 60%
# 4.38201208652e-09 20 205926256 70%
# 3.28845439768e-09 11 235344284 80%
onePercentNumOfPostings = 2941803
twoPercentNumOfPostings = 5883607
threePercentNumOfPostings = 8825410
fourPercentNumOfPostings = 11767214
fivePercentNumOfPostings = 14709017
sixPercentNumOfPostings = 17650821
sevenPercentNumOfPostings = 20592624
eightPercentNumOfPostings = 23534428
ninePercentNumOfPostings = 26476231
tenPercentNumOfPostings =    29418035
twentyPercentNumOfPostings = 58836069
thirtyPercentNumOfPostings =      88254104
thirtyFivePercentNumOfPostings = 102963121
fourtyPercentNumOfPostings =     117672139
fiftyPercentNumOfPostings =  147090174
sixtyPercentNumOfPostings =  176508208
seventyPercentNumOfPostings = 205926243
eightyPercentNumOfPostings = 235344278

nintyPercentNumOfPostings = 264762312 # don't think I need this


ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/GOV2_documentPostingArray_0M_1M_UPP_histogram_20141215_sortedByDecreasingProbability_20141215"
ifh = open(ifn,"r")
l = ifh.readline()
totalNumOfPostings = 0
currProbability = 0.0
currNumOfPostings = 0
while l:
	le = l.strip().split()
	currProbability = float(le[0])
	currNumOfPostings = int(le[1])
	totalNumOfPostings += currNumOfPostings
	if totalNumOfPostings > 102963121:
		break
	l = ifh.readline()
ifh.close()
print "Overall:"
print currProbability,currNumOfPostings,totalNumOfPostings
exit(1)

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/95KQ.b_polyIRToolkitCompatible"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/95KQ.b"
ifh = open(ifn,"r")
l = ifh.readline()
counter = 0
while l:
	ofh.write(str(counter) + ":" + l)
	l = ifh.readline()
	counter += 1
	#if counter == 1000:
	#	break
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
ofh.close()
exit(1)

completeCMD = ""
cmdStr1 = "./irtk --local --query --query-mode=batch --result-format=pruning --config-options=batch_query_input_file="
cmdStr2 = "LEAVE_wei_uniform_pruning_2013-09-12-16-12-30_None_None"
# "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/pairInfo/xaa"

mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/pairInfo"
f = []

for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
print "len(filenames):",len(filenames)

for index,ifn in enumerate(filenames):
    lineCounter = 0
    completePath = mypath + "/" + ifn
    # print index+1,completePath
    completeCMD = cmdStr1 + completePath + " " + cmdStr2 + " " + ">" + " " + completePath + "_rawResults_20141216 &"
    print completeCMD
# print "Overall:"
exit(1)

'''
# just for checking.
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/theAllInOneFeatureArrays_20141215/GOV2_documentPostingArray_0M_1M_features_allInOne"
ifh = open(ifn,"r")
###########################
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
totalNumOfPostings = 0
while numOfBytesRead < fileSize:
    byteString = ifh.read(4 + 4 + 4)
    (docID,numOfPostings,docHit) = unpack( "3I", byteString)
    print docID,numOfPostings,docHit
    
    for i in range(0,numOfPostings):
        byteString = ifh.read(4 + 4 + 4 + 4 + 4 + 4 + 4)
        (termID,static,dynamic,combined,score1,postingHit,pt) = unpack( "1I4f1I1f", byteString)
        print "----->",i,termID,static,dynamic,combined,score1,postingHit,pt

    if docID == 0:
        break
    numOfDocumentsProcessed += 1
    numOfBytesRead += 12 + numOfPostings * 4 * 7
########################################
print "Overall:"
print "ifn:",ifn
ifh.close()
exit(1)
'''

# combine the feature arrays together.
# sys.argv[1] GOV2_documentPostingArray_0M_1M
# sys.argv[2] GOV2_documentPostingArray_0M_1M_docHit_postingHit_pt_added
# sys.argv[3] GOV2_documentPostingArray_0M_1M_features_allInOne
ifnBase0 = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/"
ifnBase1 = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/docHitsANDPostingHit/"
ofnBase = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/theAllInOneFeatureArrays_20141215/"
ifn0 = ifnBase0 + sys.argv[1]
ifn1 = ifnBase1 + sys.argv[2]
ofn = ofnBase + sys.argv[3]
#ifn0 = ifnBase0
#ifn1 = ifnBase1
########################################
ifh0 = open(ifn0,"rb")
ifh1 = open(ifn1,"rb")
ofh = open(ofn,"w")

statinfo = os.stat(ifn0)
fileSize0 = statinfo.st_size
print "currentInputFileName: ",ifn0
print "file size:",fileSize0
numOfBytesRead = 0
numOfDocumentsProcessed = 0
totalNumOfPostings = 0
while numOfBytesRead < fileSize0:
    byteString0 = ifh0.read(4 + 4)
    byteString1 = ifh1.read(4 + 4 + 4)
    (docID0,numOfPostings0) = unpack( "2I", byteString0)
    (docID1,numOfPostings1,docHit1) = unpack( "3I", byteString1)
    print "docID:",docID1,numOfPostings1,docHit1
    outputByteString = pack("3I",docID1,numOfPostings1,docHit1)
    ofh.write(outputByteString)
    
    if docID0 == docID1 and numOfPostings0 == numOfPostings1:
        pass
    else:
        print "critical error"
        print "docID0:",docID0,"numOfPostings0:",numOfPostings0
        print "docID1:",docID1,"numOfPostings1:",numOfPostings1
        exit(1)
    
    totalNumOfPostings += numOfPostings0
    for i in range(0,numOfPostings0):
        byteString0 = ifh0.read(4 + 4 + 4 + 4 + 4)
        (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString0)
        # print "----->",i,termID,static,dynamic,combined,score1
        
        byteString1 = ifh1.read(4 + 4 + 4 + 4)
        (termID,score1,postingHit,pt) = unpack( "1I1f1I1f", byteString1)
        # print "----->",i,termID,score1,postingHit,pt        
        
        outputByteString = pack("1I4f1I1f",termID,static,dynamic,combined,score1,postingHit,pt)
        ofh.write(outputByteString)
    #if docID0 == 1:
    #    break
    numOfDocumentsProcessed += 1
    numOfBytesRead += 8 + numOfPostings0 * 4 * 5
########################################
print "Overall:"
print "ifn0:",ifn0
print "ifn1:",ifn1
print "ofn:",ofn
ifh0.close()
ifh1.close()
ofh.close()
exit(1)


# compute the histogram thing for the UPP method fucking NOW!!!
UPPProbabilityHistogramDict = {}
ifnBase = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/"
ofnBase = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
# ifn = "GOV2_documentPostingArray_0M_1M"
ifn = sys.argv[1]
completeIFN = ifnBase + ifn
completeOFN = completeIFN + "_UPP_histogram_20141215"
inputFileHandler0 = open(completeIFN,"rb")
ofh = open(completeOFN,"w")
statinfo = os.stat(completeIFN)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
totalNumOfPostings = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4)
    (docID,numOfPostings) = unpack( "2I", byteString)
    print "docID:",docID,numOfPostings
    totalNumOfPostings += numOfPostings
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (_,_,_,combined,_) = unpack( "1I4f", byteString)
        # print "----->",i,termID,static,dynamic,combined,score1
        if combined not in UPPProbabilityHistogramDict:
            UPPProbabilityHistogramDict[combined] = 1
        else:
            UPPProbabilityHistogramDict[combined] += 1
    if docID == 1000:
        print docID,"been processed."
        # print "docID:",docID
        # break
    numOfDocumentsProcessed += 1
    numOfBytesRead += 8 + numOfPostings * 4 * 5

print "Overall:"
print totalNumOfPostings,len(UPPProbabilityHistogramDict)
for UPPProbability in UPPProbabilityHistogramDict:
    ofh.write(str(UPPProbability) + " " + str(UPPProbabilityHistogramDict[UPPProbability]) + "\n")
print "completeIFN:",completeIFN
print "completeOFN:",completeOFN
inputFileHandler0.close()
ofh.close()
print "Ends."
exit(1)





queryDict = {}
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2/100KQueries/GOV2_100KQueries"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2/100KQueries/GOV2_100KQueries_head95K"
ifh = open(ifn,"r")
for line in ifh.readlines():
    queryContent = line.strip().split(":")[1]
    if queryContent not in queryDict:
        queryDict[queryContent] = 1
print "len(queryDict):",len(queryDict)
ifh.close()

counter = 0
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2/04-06.topics.polyIRTKCompatibleMode"
ifh = open(ifn,"r")
for line in ifh.readlines():
    queryContent = line.strip().split(":")[1]
    if queryContent in queryDict:
        counter += 1
print "counter:",counter
ifh.close()
exit(1)

'''
emptyLineFlag = True
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/pratice2_raw.txt"
ifh = open(inputFileName,"r")
for line in ifh.readlines():
    if line.strip() != "":
        print line.strip()
        emptyLineFlag = True
    elif line.strip() == "" and emptyLineFlag:
        print
        emptyLineFlag = False
    else:
        pass

outputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/pratice2_well_formatted.txt"
ofh = open(outputFileName,"w")

# do logic
ifh.close()
ofh.close()
exit(1)
'''

'''
# unpruned case
print "unpruned case"
rankList = ["1","2","3","4","5","6","7","8","9","10"]
documentResultDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/results/GOV2/TOP10_documentResults_OR_unpruned_testingQueries_20141020"
ifh0 = open(ifn0,"r")
l = ifh0.readline()
currentQID = ""
while l:
    le = l.strip().split(" ")
    currentQID = le[0]
    currentDocID = le[3]
    documentResult = currentQID + "_" + currentDocID
    if documentResult not in documentResultDict:
        documentResultDict[documentResult] = 1
    l = ifh0.readline()

print "len(documentResultDict):",len(documentResultDict)
ifh0.close()

print "doc hits / doc size"
counter = 0
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResult_docHitDividedByDocSize_06EfficiencyTrack_testingQueries_1%_20141212"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        le = l.strip().split(" ")
        qid = le[1]

    if len(le) == 3 and le[0] in rankList:
        currentDocResult = qid + "_" + le[2]
        if currentDocResult in documentResultDict:
            counter += 1 
    l = ifh1.readline()
ifh1.close()
print "Overall:"
print "counter:",counter/len(documentResultDict)

print "hybrid h = 1"
counter = 0
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResult_hybrid_h_1_06EfficiencyTrack_testingQueries_1%_20141212"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        le = l.strip().split(" ")
        qid = le[1]

    if len(le) == 3 and le[0] in rankList:
        currentDocResult = qid + "_" + le[2]
        if currentDocResult in documentResultDict:
            counter += 1 
    l = ifh1.readline()
ifh1.close()
print "Overall:"
print "counter:",counter/len(documentResultDict)
exit(1)
'''



# unpruned case
print "unpruned case"
rankList = ["1","2","3","4","5","6","7","8","9","10"]
documentResultDict = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResult_unpruned_forEvaluation_20141206_05EffeciencyTaskTestingQueries"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        le = l.strip().split(" ")
        qid = le[1]

    if len(le) == 3 and le[0] in rankList:
        currentDocResult = qid + "_" + le[2]
        if currentDocResult not in documentResultDict:
            documentResultDict[currentDocResult] = 1
    l = ifh1.readline()
ifh1.close()
print "len(documentResultDict):",len(documentResultDict)

# normalized posting hit case
print "hybrid h_1"
counter = 0
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_makeThingsClear_20141212_1%_hybrid_h_1"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        le = l.strip().split(" ")
        qid = le[1]

    if len(le) == 3 and le[0] in rankList:
        currentDocResult = qid + "_" + le[2]
        if currentDocResult in documentResultDict:
            counter += 1 
    l = ifh1.readline()
ifh1.close()
print "Overall:"
print "counter:",counter/len(documentResultDict)

'''
# normalized posting hit case
print "normalized posting hit case"
counter = 0
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResult_normalizedPostingHit_1%_forEvaluation_20141206_05EffeciencyTaskTestingQueries"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        le = l.strip().split(" ")
        qid = le[1]

    if len(le) == 3 and le[0] in rankList:
        currentDocResult = qid + "_" + le[2]
        if currentDocResult in documentResultDict:
            counter += 1 
    l = ifh1.readline()
ifh1.close()
print "Overall:"
print "counter:",counter/len(documentResultDict)
'''

'''
# UPP-5
print "UPP-5"
counter = 0
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResult_UPP-5_forEvaluation_1%_20141211_05EffeciencyTaskTestingQueries"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        le = l.strip().split(" ")
        qid = le[1]

    if len(le) == 3 and le[0] in rankList:
        currentDocResult = qid + "_" + le[2]
        if currentDocResult in documentResultDict:
            counter += 1 
    l = ifh1.readline()
ifh1.close()
print "Overall:"
print "counter:",counter/len(documentResultDict)
'''
exit(1)

selectedDocDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_from50M/documentPartitionFileBasedOnDocHitsDividedBySize_20141102_1%"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    docID = int(le[0])
    selectedDocDict[docID] = 1
    l = ifh.readline()
ifh.close()
print "len(selectedDocDict):",len(selectedDocDict)

# step2: load the document posting array about posting hit and pt to play.
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/docHitsANDPostingHit/GOV2_documentPostingArray_0M_1M_docHit_postingHit_pt_added"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
numOfPostingsRecorded = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (docIDFromFile,numOfPostings,numDocHit) = unpack( "3I", byteString)
    # print docIDFromFile,numOfPostings,numDocHit
    if docIDFromFile not in selectedDocDict:
        for i in range(0,numOfPostings):
            byteString = inputFileHandler0.read(4 + 4 + 4 + 4) 
    else:
        tempCounter = 0
        for i in range(0,numOfPostings):
            byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
            (termID,impactScore,postingHit,pt) = unpack( "1I1f1I1f", byteString)
            if postingHit >= 1:
                numOfPostingsRecorded += 1
                tempCounter += 1
        print docIDFromFile,numOfPostings,tempCounter,numOfPostingsRecorded
    numOfBytesRead += 12 + numOfPostings * 16
    # print docIDFromFile,numOfPostings
    
    #if docIDFromFile == 100:
    #    break
inputFileHandler0.close()
print "Ends."
print "Overall:"
print "ifn:",ifn
print "numOfPostingsRecorded:",numOfPostingsRecorded
inputFileHandler0.close()
exit(1)







numOfPostings = 6451948010
num_1P = numOfPostings * 0.01
num_2P = numOfPostings * 0.02
num_3P = numOfPostings * 0.03
num_4P = numOfPostings * 0.04
num_5P = numOfPostings * 0.05
num_6P = numOfPostings * 0.06
num_7P = numOfPostings * 0.07
num_8P = numOfPostings * 0.08
num_9P = numOfPostings * 0.09
num_10P = numOfPostings * 0.1
num_15P = numOfPostings * 0.15
num_20P = numOfPostings * 0.2
num_30P = numOfPostings * 0.3
num_40P = numOfPostings * 0.4
num_50P = numOfPostings * 0.5

num_1P_flag = True
num_2P_flag = True
num_3P_flag = True
num_4P_flag = True
num_5P_flag = True
num_6P_flag = True
num_7P_flag = True
num_8P_flag = True
num_9P_flag = True
num_10P_flag = True
num_15P_flag = True
num_20P_flag = True
num_30P_flag = True
num_40P_flag = True
num_50P_flag = True

# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitHistogram_normalized_20141204/normalizedPostingHit_histogram_0M_1M_sortedByNormalizedPostingHit.binary"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitHistogram_normalized_20141204/normalizedPostingHit_histogram_0M_1M_sortedByNormalizedPostingHit.binary"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitHistogram_normalized_20141204/usingSmoothedPostingCounts/resultfile0"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
numOfPostingsInTotal = 0
previousScore = 2000000000
lineCounter = 1
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4)
    (score,numOfPostings) = unpack( "2I", byteString)
    # print score,numOfPostings
    
    # check
    #if score <= previousScore:
    #    previousScore = score
    #else:
    #    print previousScore,score,lineCounter
    #    break
    
    #if lineCounter == 100:
    #    break
    lineCounter += 1
    numOfPostingsInTotal += numOfPostings

    if numOfPostingsInTotal >= num_1P and num_1P_flag:
        print "1",numOfPostingsInTotal,score
        num_1P_flag = False
    if numOfPostingsInTotal >= num_2P and num_2P_flag:
        print "2",numOfPostingsInTotal,score
        num_2P_flag = False
    if numOfPostingsInTotal >= num_3P and num_3P_flag:
        print "3",numOfPostingsInTotal,score
        num_3P_flag = False
    if numOfPostingsInTotal >= num_4P and num_4P_flag:
        print "4",numOfPostingsInTotal,score
        num_4P_flag = False    
    if numOfPostingsInTotal >= num_5P and num_5P_flag:
        print "5",numOfPostingsInTotal,score
        num_5P_flag = False
    if numOfPostingsInTotal >= num_6P and num_6P_flag:
        print "6",numOfPostingsInTotal,score
        num_6P_flag = False
    if numOfPostingsInTotal >= num_7P and num_7P_flag:
        print "7",numOfPostingsInTotal,score
        num_7P_flag = False
    if numOfPostingsInTotal >= num_8P and num_8P_flag:
        print "8",numOfPostingsInTotal,score
        num_8P_flag = False
    if numOfPostingsInTotal >= num_9P and num_9P_flag:
        print "9",numOfPostingsInTotal,score
        num_9P_flag = False
    if numOfPostingsInTotal >= num_10P and num_10P_flag:
        print "10",numOfPostingsInTotal,score
        num_10P_flag = False
    if numOfPostingsInTotal >= num_15P and num_15P_flag:
        print "15",numOfPostingsInTotal,score
        num_15P_flag = False
    if numOfPostingsInTotal >= num_20P and num_20P_flag:
        print "20",numOfPostingsInTotal,score
        num_20P_flag = False
    if numOfPostingsInTotal >= num_30P and num_30P_flag:
        print "30",numOfPostingsInTotal,score
        num_30P_flag = False
    if numOfPostingsInTotal >= num_40P and num_40P_flag:
        print "40",numOfPostingsInTotal,score
        num_40P_flag = False
    if numOfPostingsInTotal >= num_50P and num_50P_flag:
        print "50",numOfPostingsInTotal,score
        num_50P_flag = False    
    numOfBytesRead += 8
inputFileHandler0.close()
print "Ends."
print "Overall:"
print "ifn:",ifn
print "numOfPostingsInTotal:",numOfPostingsInTotal
inputFileHandler0.close()
exit(1)

numOfPostingsInTotal = 6451948010
numOfPostingsAt1Percent = int(numOfPostingsInTotal * 0.01)
numOfPostingsAt2Percent = int(numOfPostingsInTotal * 0.02)
numOfPostingsAt3Percent = int(numOfPostingsInTotal * 0.03)
numOfPostingsAt4Percent = int(numOfPostingsInTotal * 0.04)
numOfPostingsAt5Percent = int(numOfPostingsInTotal * 0.05)
numOfPostingsAt6Percent = int(numOfPostingsInTotal * 0.06)
numOfPostingsAt7Percent = int(numOfPostingsInTotal * 0.07)
numOfPostingsAt8Percent = int(numOfPostingsInTotal * 0.08)
numOfPostingsAt9Percent = int(numOfPostingsInTotal * 0.09)
numOfPostingsAt10Percent = int(numOfPostingsInTotal * 0.1)
numOfPostingsAt15Percent = int(numOfPostingsInTotal * 0.15)
numOfPostingsAt20Percent = int(numOfPostingsInTotal * 0.2)
numOfPostingsAt30Percent = int(numOfPostingsInTotal * 0.3)
numOfPostingsAt40Percent = int(numOfPostingsInTotal * 0.4)
numOfPostingsAt50Percent = int(numOfPostingsInTotal * 0.5)

numOfPostingsAt1PercentTag = True
numOfPostingsAt2PercentTag = True
numOfPostingsAt3PercentTag = True
numOfPostingsAt4PercentTag = True
numOfPostingsAt5PercentTag = True
numOfPostingsAt6PercentTag = True
numOfPostingsAt7PercentTag = True
numOfPostingsAt8PercentTag = True
numOfPostingsAt9PercentTag = True
numOfPostingsAt10PercentTag = True
numOfPostingsAt15PercentTag = True
numOfPostingsAt20PercentTag = True
numOfPostingsAt30PercentTag = True
numOfPostingsAt40PercentTag = True
numOfPostingsAt50PercentTag = True

# Updated by Wei 2014/06/17
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_from50M/sortedByHitFreq_withXdocValuesAdded/resultfile0_50MQueries_final_sortedByHitFreqDividedByXdocValues"
inputFileHandler0 = open(inputFileName,"r")
numOfDocumentsCanBeIncluded = 0
numOfPostingsCurrent = 0

currentLine = inputFileHandler0.readline()
numOfDocumentsCanBeIncluded += 1
while currentLine:
    le = currentLine.strip().split(" ")
    numOfPostingsCurrent += int(le[3])

    if numOfPostingsCurrent >= numOfPostingsAt1Percent and numOfPostingsAt1PercentTag:
        print "1%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt1Percent
        numOfPostingsAt1PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt2Percent and numOfPostingsAt2PercentTag:
        print "2%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt2Percent
        numOfPostingsAt2PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt3Percent and numOfPostingsAt3PercentTag:
        print "3%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt3Percent
        numOfPostingsAt3PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt4Percent and numOfPostingsAt4PercentTag:
        print "4%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt4Percent
        numOfPostingsAt4PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt5Percent and numOfPostingsAt5PercentTag:
        print "5%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt5Percent
        numOfPostingsAt5PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt6Percent and numOfPostingsAt6PercentTag:
        print "6%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt6Percent
        numOfPostingsAt6PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt7Percent and numOfPostingsAt7PercentTag:
        print "7%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt7Percent
        numOfPostingsAt7PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt8Percent and numOfPostingsAt8PercentTag:
        print "8%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt8Percent
        numOfPostingsAt8PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt9Percent and numOfPostingsAt9PercentTag:
        print "9%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt9Percent
        numOfPostingsAt9PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt10Percent and numOfPostingsAt10PercentTag:
        print "10%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt10Percent
        numOfPostingsAt10PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt15Percent and numOfPostingsAt15PercentTag:
        print "15%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt15Percent
        numOfPostingsAt15PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt20Percent and numOfPostingsAt20PercentTag:
        print "20%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt20Percent
        numOfPostingsAt20PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt30Percent and numOfPostingsAt30PercentTag:
        print "30%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt30Percent
        numOfPostingsAt30PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt40Percent and numOfPostingsAt40PercentTag:
        print "40%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt40Percent
        numOfPostingsAt40PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt50Percent and numOfPostingsAt50PercentTag:
        print "50%",numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt50Percent
        numOfPostingsAt50PercentTag = False

    currentLine = inputFileHandler0.readline()
    numOfDocumentsCanBeIncluded += 1
inputFileHandler0.close()
exit(1)

docIDAndXdocValueDict = {}
docIDAndNumOfPostingsDict = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2/Gov2_numOfPostings_xDocValues"
ifh = open(ifn1,"r")
l = ifh.readline()
lineCounter = 1
while l:
    le = l.strip().split(" ")
    docID = int(le[0])
    numOfPostings = int(le[1])
    xDoc = float(le[2])
    docIDAndXdocValueDict[docID] = xDoc
    docIDAndNumOfPostingsDict[docID] = numOfPostings
    l = ifh.readline()
    lineCounter += 1
    if lineCounter % 1000000 == 0:
        print lineCounter,"processed."
ifh.close()
print "len(docIDAndXdocValueDict):",len(docIDAndXdocValueDict)
print "len(docIDAndNumOfPostingsDict):",len(docIDAndNumOfPostingsDict)

docIDAndDocHitsDict = {}
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_from50M/sortedByHitFreq_withXdocValuesAdded/resultfile0_50MQueries_final_sortedByHitFreq_withNumOfPostingsAdded"
ifh = open(ifn2,"r")
l = ifh.readline()
lineCounter = 1
while l:
    le = l.strip().split(" ")
    docID = int(le[0])
    docHit = int(le[1])
    docIDAndDocHitsDict[docID] = docHit 
    l = ifh.readline()
    lineCounter += 1
    if lineCounter % 1000000 == 0:
        print lineCounter,"processed."
print "len(docIDAndDocHitsDict):",len(docIDAndDocHitsDict)
ifh.close()

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_from50M/sortedByHitFreq_withXdocValuesAdded/resultfile0_50MQueries_final_sortedByHitFreq_withXdocValuesAdded"
ofh = open(ofn,"w")
for i in range(0,25205179):
    if i not in docIDAndDocHitsDict:
        outputLine = str(i) + " " + str(0) + " " + str(docIDAndXdocValueDict[i]) + " " + str(docIDAndNumOfPostingsDict[i]) + " " + str(float(0) / float(docIDAndXdocValueDict[i])) + "\n"
        ofh.write(outputLine)
    elif i not in docIDAndXdocValueDict:
        outputLine = str(i) + " " + str(docIDAndDocHitsDict[i]) + " " + str(0) + " " + str(docIDAndNumOfPostingsDict[i]) + " " + str(0) + "\n"
        ofh.write(outputLine)        
    else:
        outputLine = str(i) + " " + str(docIDAndDocHitsDict[i]) + " " + str(docIDAndXdocValueDict[i]) + " " + str(docIDAndNumOfPostingsDict[i]) + " " + str(float(docIDAndDocHitsDict[i]) / float(docIDAndXdocValueDict[i])) + "\n"
        ofh.write(outputLine)
ofh.close()

print "Overall:"
print "ifn1:",ifn1
print "ifn2:",ifn2
print "ofn:",ofn
exit(1)

# add the pt part for the term
termIDAndProbabilityDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/data/LMs/GOV2/wholeLexicon_GOV2_unigramProbablity_fromJuan_20140707.binary"
ifh = open(ifn0,"rb")
##########
statinfo = os.stat(ifn0)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn0
print "file size:",fileSize
numOfBytesRead = 0
lineCounter = 1
while numOfBytesRead < fileSize:
    byteString = ifh.read(4 + 4)
    (termID,probability) = unpack( "1I1f", byteString)
    termIDAndProbabilityDict[termID] = probability 
    # print termID,probability
    lineCounter += 1
    #if lineCounter == 100:
    #    break
    numOfBytesRead += 8
##########
print "Overall:"
print "len(termIDAndProbabilityDict):",len(termIDAndProbabilityDict)
ifh.close()

termAndTermIDDict = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2/wholeLexiconTermID_Term_ListLength_GOV2"
ifh = open(ifn1,"r")
l = ifh.readline()
lineCounter = 0
while l:
    le = l.strip().split(" ")
    termID = int(le[0])
    term = le[1]
    termAndTermIDDict[term] = termID
    l = ifh.readline()
    lineCounter += 1
    #if lineCounter == 100:
    #    break
ifh.close()
print "Overall:"
print "len(termAndTermIDDict):",len(termAndTermIDDict)
print termIDAndProbabilityDict[termAndTermIDDict["0000000000"]]
# print termIDAndProbabilityDict[termAndTermIDDict["apple"]]

ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_positiveExamplesONLY_20141209_head10KQueries"
ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_negativeExamplesONLY_20141209_head10KQueries"
fileList = []
fileList.append(ifn3)
fileList.append(ifn4)
for fileName in fileList:
    ifh = open(fileName,"r")
    ofn = fileName + "_pt_added"
    ofh = open(ofn,"w")
    l = ifh.readline()
    while l:
        le = l.strip().split(",")
        term = le[2]
        ptValue = 0.0
        if term in termAndTermIDDict:
            ptValue = termIDAndProbabilityDict[termAndTermIDDict[term]]
        else:
            pass
        ofh.write(le[0] + "," + le[1] + "," + le[2] + "," + str(ptValue) + "," + le[3] + "," + le[4] + "," + le[5] + "," + le[6] + "," + le[7] + "," + le[8] + "\n")
        l = ifh.readline()
    ofh.close()
    print "done Output:",ofn
exit(1)



documentResultDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/results/GOV2/TOP10_documentResults_OR_unpruned_testingQueries_20141020"
ifh0 = open(ifn0,"r")
l = ifh0.readline()
currentQID = ""
while l:
    le = l.strip().split(" ")
    currentQID = le[0]
    currentDocID = le[3]
    documentResult = currentQID + "_" + currentDocID
    if documentResult not in documentResultDict:
        documentResultDict[documentResult] = 1
    l = ifh0.readline()

print "len(documentResultDict):",len(documentResultDict)
ifh0.close()

rankList = ["1","2","3","4","5","6","7","8","9","10"]
counter = 0
# ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResult_postingHit_1%_forEvaluation_20141205"
# ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResult_normalizedPostingHit_1%_forEvaluation_20141205"
# ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResult_normalizedPostingHit_forEvaluation_5%_20141206"
# ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResult_postingHit_forEvaluation_5%_20141206"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResult_normalizedPostingHit_forEvaluation_2%_20141206"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        le = l.strip().split(" ")
        qid = le[1]

    if len(le) == 3 and le[0] in rankList:
        currentDocResult = qid + "_" + le[2]
        if currentDocResult in documentResultDict:
            counter += 1 
    l = ifh1.readline()
ifh1.close()
print "Overall:"
print "counter:",counter/len(documentResultDict)
exit(1)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/docHitsANDPostingHit/GOV2_documentPostingArray_0M_1M_docHit_postingHit_pt_added"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (docIDFromFile,numOfPostings,numDocHit) = unpack( "3I", byteString)
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
        (termID,impactScore,postingHit,pt) = unpack( "1I1f1I1f", byteString)
        # print "----->",i,termID,impactScore,postingHit,pt
    numOfBytesRead += 12 + numOfPostings * 16
    # print "numOfBytesRead:",numOfBytesRead
    print docIDFromFile,numOfPostings,numDocHit,numOfBytesRead
    
    if docIDFromFile == 1:
        break
inputFileHandler0.close()
print "Ends."
print "Overall:"
print "ifn:",ifn
inputFileHandler0.close()
exit(1)

numOfPostingsCurrent = 0
# numOfPostingsNeeded = 100
numOfPostingsNeeded = 258077901
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitHistogram_normalized_20141204/normalizedPostingHit_histogram_0M_1M_sortedByNormalizedPostingHit"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    score = int(le[0])
    numOfPostingsCurrent += int(le[1])
    if numOfPostingsCurrent > numOfPostingsNeeded:
        print score,numOfPostingsCurrent,numOfPostingsNeeded
        exit(1)
    l = ifh.readline()
ifh.close()
exit(1)

numOfPostings = 6451948010
num_1P = numOfPostings * 0.01
num_2P = numOfPostings * 0.02
num_3P = numOfPostings * 0.03
num_4P = numOfPostings * 0.04
num_5P = numOfPostings * 0.05
num_6P = numOfPostings * 0.06
num_7P = numOfPostings * 0.07
num_8P = numOfPostings * 0.08
num_9P = numOfPostings * 0.09
num_10P = numOfPostings * 0.1
num_15P = numOfPostings * 0.15
num_20P = numOfPostings * 0.2
num_30P = numOfPostings * 0.3

num_1P_flag = True
num_2P_flag = True
num_3P_flag = True
num_4P_flag = True
num_5P_flag = True
num_6P_flag = True
num_7P_flag = True
num_8P_flag = True
num_9P_flag = True
num_10P_flag = True
num_15P_flag = True
num_20P_flag = True
num_30P_flag = True

numOfPostings = 0
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TOP1000PostingHits_sortedByPostingHit_20141120"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    currentFreqBar = int(le[0])
    numOfPostings += int(le[1])
    if numOfPostings >= num_1P and num_1P_flag:
        print "1",numOfPostings,currentFreqBar
        num_1P_flag = False
    if numOfPostings >= num_2P and num_2P_flag:
        print "2",numOfPostings,currentFreqBar
        num_2P_flag = False
    if numOfPostings >= num_3P and num_3P_flag:
        print "3",numOfPostings,currentFreqBar
        num_3P_flag = False
    if numOfPostings >= num_4P and num_4P_flag:
        print "4",numOfPostings,currentFreqBar
        num_4P_flag = False    
    if numOfPostings >= num_5P and num_5P_flag:
        print "5",numOfPostings,currentFreqBar
        num_5P_flag = False
    if numOfPostings >= num_6P and num_6P_flag:
        print "6",numOfPostings,currentFreqBar
        num_6P_flag = False
    if numOfPostings >= num_7P and num_7P_flag:
        print "7",numOfPostings,currentFreqBar
        num_7P_flag = False
    if numOfPostings >= num_8P and num_8P_flag:
        print "8",numOfPostings,currentFreqBar
        num_8P_flag = False
    if numOfPostings >= num_9P and num_9P_flag:
        print "9",numOfPostings,currentFreqBar
        num_9P_flag = False
    if numOfPostings >= num_10P and num_10P_flag:
        print "10",numOfPostings,currentFreqBar
        num_10P_flag = False
    if numOfPostings >= num_15P and num_15P_flag:
        print "15",numOfPostings,currentFreqBar
        num_15P_flag = False
    if numOfPostings >= num_20P and num_20P_flag:
        print "20",numOfPostings,currentFreqBar
        num_20P_flag = False
    if numOfPostings >= num_30P and num_30P_flag:
        print "30",numOfPostings,currentFreqBar
        num_30P_flag = False
    l = ifh.readline()
ifh.close()
exit(1)

'''
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/docHitsANDPostingHit/GOV2_documentPostingArray_0M_1M_docHit_postingHit_pt_added"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (docIDFromFile,numOfPostings,numDocHit) = unpack( "3I", byteString)
    print docIDFromFile,numOfPostings,numDocHit
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
        (termID,impactScore,postingHit,pt) = unpack( "1I1f1I1f", byteString)
        print "----->",i,termID,impactScore,postingHit,pt
    if docIDFromFile == 0:
        break
inputFileHandler0.close()
print "Ends."
print "Overall:"
print "ifn:",ifn
exit(1)
'''

# continue to argment the document posting array.
termIDAndProbabilityDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/LMs/GOV2/wholeLexicon_GOV2_unigramProbablity_fromJuan_20140707.binary"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "ifn: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4)
    (termID,probability) = unpack( "1I1f", byteString)
    # print termID,probability
    termIDAndProbabilityDict[termID] = probability
    numOfBytesRead += 4 + 4
inputFileHandler0.close()
print "Overall:"
print "ifn:",ifn
print "len(termIDAndProbabilityDict):",len(termIDAndProbabilityDict)

# "GOV2_documentPostingArray_0M_1M_docHit_postingHit_added"
relativeInputFileName = sys.argv[1]
print "relativeInputFileName:",relativeInputFileName
relativeInputFileNameElements = relativeInputFileName.strip().split("_")
relativeOutputFileName = relativeInputFileNameElements[0] + "_" + relativeInputFileNameElements[1] + "_" + relativeInputFileNameElements[2] + "_" + relativeInputFileNameElements[3] + "_" + relativeInputFileNameElements[4] + "_" + relativeInputFileNameElements[5] + "_" + "pt" + "_" + relativeInputFileNameElements[6]

basePath = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/docHitsANDPostingHit/"
ifn = basePath + relativeInputFileName
ofn = basePath + relativeOutputFileName
inputFileHandler0 = open(ifn,"rb")
outputFileHandler0 = open(ofn,"w")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "currentOutputFileName: ",ofn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (docIDFromFile,numOfPostings,numDocHit) = unpack( "3I", byteString)
    print docIDFromFile,numOfPostings,numDocHit
    outputFileHandler0.write(byteString)
    numOfBytesRead += 12
    
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4)
        (termID,impactScore,postingHit) = unpack( "1I1f1I", byteString)
        # print "----->",i,termID,impactScore,postingHit
        pt = 0.0
        if termID in termIDAndProbabilityDict:
            pt = termIDAndProbabilityDict[termID]
        else:
            pt = 0
        outputFileHandler0.write(pack("1I1f1I1f",termID,impactScore,postingHit,pt))
        numOfBytesRead += 12
    
    if docIDFromFile % 1000000 == 0:
        print docIDFromFile,"processed."
    
    # if docIDFromFile == 0:
    #    break
    
inputFileHandler0.close()
outputFileHandler0.close()
print "Ends."
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
exit(1)

# /san_data/research/juanr/cw09/bigrams-xx-10-1.1.static.sw

# Purpose:
# check whether Juan's file is OK or NOT
# ifn = "/home/vgc/juanr/gov2.bpp-10-1.1.static"
# ifn = "/home/vgc/juanr/gov2.bpp-10-1.1.dyn"
ifn = "/san_data/research/juanr/cw09/bigrams-xx-10-1.1.static.sw"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4)
    (docID,numOfPostings) = unpack( "2I", byteString)
    print "docID:",docID,numOfPostings
    
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
        if i < 10:
            print "----->",i,termID,static,dynamic,combined,score1
    
    if docID == 10:
        break
    numOfDocumentsProcessed += 1
    numOfBytesRead += 8 + score1 * 4 * 5
        
inputFileHandler0.close()
print "Ends."
exit(1)

# output the excel file for prof
# step1:
# load the head 100000 documents
docIDAndTOP1000HitDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_from50M/50MQueries/TOP1000/binaryFiles/resultfile0_50MQueries_TOP1000_final.binary0"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_from50M/50MQueries/TOP1000/binaryFiles/resultfile0_50MQueries_TOP1000_final.binary0"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_from50M/50MQueries/TOP1000/binaryFiles/xaa_docHits_500000.binary"
ifh = open(ifn,"r")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0

while numOfBytesRead < fileSize:
    byteString = ifh.read(4 + 4)
    (docID,docHit) = unpack( "2I", byteString)
    # print "docID:",docID,docHit
    docIDAndTOP1000HitDict[docID] = docHit
    numOfDocumentsProcessed += 1
    numOfBytesRead += 8
    if numOfDocumentsProcessed % 100000 == 0:
        print numOfDocumentsProcessed,"processed."
    #if numOfDocumentsProcessed > 1000000:
    #    break

print "Overall:"
print "len(docIDAndTOP1000HitDict):",len(docIDAndTOP1000HitDict)
print "docIDAndTOP1000HitDict[0]:",docIDAndTOP1000HitDict[0]
print "Ends."
ifh.close()

# step2:
# "GOV2_documentPostingArray_0M_1M_docHit_postingHit_added"
# ...
basePath = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/docHitsANDPostingHit/"
ifn = basePath + sys.argv[1]
ifh = open(ifn,"r")
lowerBound = ifn.strip().split("/")[-1].split("_")[-5]
upperBound = ifn.strip().split("/")[-1].split("_")[-4]
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/GOV2_documentPostingArray_" + str(lowerBound) + "_" + str(upperBound) + "_docHit_postingHit_rawPlay_20141123"
print "ofn:",ofn
ofh = open(ofn,"w")
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (docIDFromFile,numOfPostings,TOP10DocHit) = unpack( "3I", byteString)
    postingHitList = []
    numPostingBeingHit = 0
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4)
        (termID,impactScore,postingHit) = unpack( "1I1f1I", byteString)
        # print "----->",i,termID,impactScore,postingHit
        if postingHit != 0:
            numPostingBeingHit += 1
        postingHitList.append( (termID,postingHit) )
    postingHitList.sort(cmp=None, key=itemgetter(1), reverse=True)
    # print postingHitList
    outputLine = str(docIDFromFile) + " " 
    outputLine += str(numOfPostings) + " "
    if docIDFromFile not in docIDAndTOP1000HitDict:
        outputLine += "0" + " "
    else:
        outputLine += str(docIDAndTOP1000HitDict[docIDFromFile]) + " "
    outputLine += str(numPostingBeingHit) + " "
    for postingTuple in postingHitList[0:7]:
        (_,postingHit) = postingTuple
        outputLine += str(postingHit) + " "
    # print outputLine
    ofh.write(outputLine + "\n")
    #if docIDFromFile == 20:
    #    break
    numOfBytesRead += numOfPostings * 12 + 12
    numOfDocumentsProcessed += 1
    if numOfDocumentsProcessed % 10000 == 0:
        print numOfDocumentsProcessed,"processed."
inputFileHandler0.close()
print "Ends."
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
exit(1)

# The operation of combing rows
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_from50M/50MQueries/TOP1000/binaryFiles/resultfile0_50MQueries_TOP1000_final.binary0"
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/10MQueries_OLD/binaryFiles/resultfile0_10MQueries_previous_final.binary"
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/1MQueries/binaryFiles/resultfile0_1MQueries_final.binary"
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/2MQueries/binaryFiles/resultfile0_2MQueries_final.binary"
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/5MQueries/binaryFiles/resultfile0_5MQueries_final.binary"
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/20MQueries/binaryFiles/resultfile0_20MQueries_final.binary"
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/50MQueries/binaryFiles/resultfile0_50MQueries_final.binary"
ofh = open(ofn,"wb")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_from50M/50MQueries/TOP1000/binaryFiles/resultfile0_50MQueries_TOP1000_raw.binary0"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/10MQueries_OLD/binaryFiles/resultfile0_10MQueries_previous_raw.binary0"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/1MQueries/binaryFiles/resultfile0_1MQueries_raw.binary0"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/2MQueries/binaryFiles/resultfile0_2MQueries_raw.binary0"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/5MQueries/binaryFiles/resultfile0_5MQueries_raw.binary0"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/10MQueries/binaryFiles/resultfile0_10MQueries_raw.binary0"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/20MQueries/binaryFiles/resultfile0_20MQueries_raw.binary0"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/50MQueries/binaryFiles/resultfile0_50MQueries_raw.binary0"
ifh = open(ifn,"rb")

statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
currentDocID = -1
currentDocIDHitFreq = 0
while numOfBytesRead < fileSize:
    byteString = ifh.read(4 + 4)
    (docID,freq) = unpack( "2I", byteString)
    if currentDocID != docID:
        if currentDocID != -1:
            # print currentDocID,currentDocIDHitFreq
            ofh.write(pack("2I",currentDocID,currentDocIDHitFreq))
        
        # reassignment
        currentDocID = docID
        currentDocIDHitFreq = freq
    else:
        currentDocIDHitFreq += freq
    numOfBytesRead += 4 * 2
# final care
ofh.write(pack("2I",currentDocID,currentDocIDHitFreq))
ifh.close()
ofh.close()

print "Overall:"
print "fileSize:",fileSize
print "numOfBytesRead:",numOfBytesRead
print "ifn:",ifn
print "ofn:",ofn
exit(1)

histogramCoverageDict = {}
histogramCoverageDict["00"]  = 0
histogramCoverageDict["01"]  = 0
histogramCoverageDict["02"]  = 0
histogramCoverageDict["1"]  = 0
histogramCoverageDict["2"]  = 0
histogramCoverageDict["3"]  = 0
histogramCoverageDict["4"]  = 0
histogramCoverageDict["5"]  = 0
histogramCoverageDict["6"]  = 0
histogramCoverageDict["7"]  = 0
histogramCoverageDict["8"]  = 0
histogramCoverageDict["9"]  = 0
histogramCoverageDict["10"]  = 0
histogramCoverageDict["11"]  = 0
histogramCoverageDict["12"]  = 0

basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingPerDocumentCoverageAnalysis_20141123/"
ifn0 = "postingPerDocumentCoverage_0M_1M"
ifn1 = "postingPerDocumentCoverage_1M_2M"
ifn2 = "postingPerDocumentCoverage_2M_3M"
ifn3 = "postingPerDocumentCoverage_3M_4M"
ifn4 = "postingPerDocumentCoverage_4M_5M"
ifn5 = "postingPerDocumentCoverage_5M_6M"
ifn6 = "postingPerDocumentCoverage_6M_7M"
ifn7 = "postingPerDocumentCoverage_7M_8M"
ifn8 = "postingPerDocumentCoverage_8M_9M"
ifn9 = "postingPerDocumentCoverage_9M_10M"
ifn10 = "postingPerDocumentCoverage_10M_11M"
ifn11 = "postingPerDocumentCoverage_11M_12M"
ifn12 = "postingPerDocumentCoverage_12M_13M"
ifn13 = "postingPerDocumentCoverage_13M_14M"
ifn14 = "postingPerDocumentCoverage_14M_15M"
ifn15 = "postingPerDocumentCoverage_15M_16M"
ifn16 = "postingPerDocumentCoverage_16M_17M"
ifn17 = "postingPerDocumentCoverage_17M_18M"
ifn18 = "postingPerDocumentCoverage_18M_19M"
ifn19 = "postingPerDocumentCoverage_19M_20M"
ifn20 = "postingPerDocumentCoverage_20M_21M"
ifn21 = "postingPerDocumentCoverage_21M_22M"
ifn22 = "postingPerDocumentCoverage_22M_23M"
ifn23 = "postingPerDocumentCoverage_23M_24M"
ifn24 = "postingPerDocumentCoverage_24M_25M"
ifn25 = "postingPerDocumentCoverage_25M_END"
fileList = []
fileList.append(ifn0)
fileList.append(ifn1)
fileList.append(ifn2)
fileList.append(ifn3)
fileList.append(ifn4)
fileList.append(ifn5)
fileList.append(ifn6)
fileList.append(ifn7)
fileList.append(ifn8)
fileList.append(ifn9)
fileList.append(ifn10)
fileList.append(ifn11)
fileList.append(ifn12)
fileList.append(ifn13)
fileList.append(ifn14)
fileList.append(ifn15)
fileList.append(ifn16)
fileList.append(ifn17)
fileList.append(ifn18)
fileList.append(ifn19)
fileList.append(ifn20)
fileList.append(ifn21)
fileList.append(ifn22)
fileList.append(ifn23)
fileList.append(ifn24)
fileList.append(ifn25)
docCounter = 0
for fileName in fileList:
    ifn = basePath + fileName
    ifh = open(ifn,"r")
    l = ifh.readline()
    while l:
        le = l.strip().split(" ")
        coverageRate = float(le[3])
        if coverageRate >= 0 and coverageRate< 0.01:
            histogramCoverageDict["00"] += 1
        if coverageRate >= 0.01 and coverageRate< 0.05:
            histogramCoverageDict["01"] += 1
        if coverageRate >= 0.05 and coverageRate< 0.1:
            histogramCoverageDict["02"] += 1
        if coverageRate >= 0.1 and coverageRate< 0.2:
            histogramCoverageDict["1"] += 1
        if coverageRate >= 0.2 and coverageRate< 0.3:
            histogramCoverageDict["2"] += 1
        if coverageRate >= 0.3 and coverageRate< 0.4:
            histogramCoverageDict["3"] += 1
        if coverageRate >= 0.4 and coverageRate< 0.5:
            histogramCoverageDict["4"] += 1
        if coverageRate >= 0.5 and coverageRate< 0.6:
            histogramCoverageDict["5"] += 1
        if coverageRate >= 0.6 and coverageRate< 0.7:
            histogramCoverageDict["6"] += 1
        if coverageRate >= 0.7 and coverageRate< 0.8:
            histogramCoverageDict["7"] += 1
        if coverageRate >= 0.8 and coverageRate< 0.9:
            histogramCoverageDict["8"] += 1
        if coverageRate >= 0.9 and coverageRate<= 1.0:
            histogramCoverageDict["9"] += 1
        l = ifh.readline()
        docCounter += 1
        if docCounter % 1000000 == 0:
            print docCounter,"processed"
            break
    ifh.close()
    print ifn
print "Overall:"
for key in histogramCoverageDict:
    print key,histogramCoverageDict[key]
exit(1)

xDataPointsSet2 = []
yDataPointsSet2 = []
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingPerDocumentCoverageAnalysis_20141123/postingPerDocumentCoverage_0M_1M"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 0
while l:
    le = l.strip().split(" ")
    xDataPointsSet2.append(int(le[0]))
    yDataPointsSet2.append(float(le[3]))
    l = ifh.readline()
    lineCounter += 1
    if lineCounter == 10:
        break
plt.plot(xDataPointsSet2, yDataPointsSet2)
plt.show()
exit(1)

# simple check
inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/data/LMs/Clueweb09B/wholeLexicon_Clueweb09B_goodTuring_unigramProbablity_20140610.binary"
inputFileHandler0 = open(inputFileName,"r")
for i in range(0,100000000):
    byteString = inputFileHandler0.read(4 + 4)
    (termID,firstFactorProbablity) = unpack( "1I1f", byteString)
    print termID,firstFactorProbablity
inputFileHandler0.close()
exit(1)

termListLengthDict = {}
numOfPostings = 17075485964
onePercent = int(numOfPostings * 0.01)
print "numOfPostings:",numOfPostings
print "onePercent:",onePercent
ifn = "/san_share/wei/workspace/NYU_IRTK/results/bigramFromJuan/clueweb09B/clueweb09B_BPP_static_20141122/resultfile0"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "ifn: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfPostingPopped = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
    (termID,docID,currentProbability,impactScore) = unpack( "2I2f", byteString)
    # print termID,docID,currentProbability,impactScore
    if termID not in termListLengthDict:
        termListLengthDict[termID] = 0
    else:
        termListLengthDict[termID] += 1
    
    if numOfPostingPopped % 1000000 == 0:
        print "numOfPostingPopped:",numOfPostingPopped
    
    if numOfPostingPopped == onePercent:
        break
    numOfPostingPopped += 1
    numOfBytesRead += 16
inputFileHandler0.close()
print "Overall:"
print "ifn:",ifn
for termID in termListLengthDict:
    print termID,termListLengthDict[termID]
exit(1)

'''
ifn = "/san_share/wei/workspace/NYU_IRTK/results/bigramFromJuan/clueweb09B/clueweb09B_BPP_static_20141122/subPostingPopped_BPP_static_1_0M_1M"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "ifn: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfPostingPopped = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
    (termID,docID,currentProbability,impactScore) = unpack( "2I2f", byteString)
    print termID,docID,currentProbability,impactScore
    numOfPostingPopped += 1
    if numOfPostingPopped == 100:
        break
    numOfBytesRead += 16
inputFileHandler0.close()
print "Overall:"
print "ifn:",ifn
exit(1)

ifn = "/home/vgc/juanr/bigrams-xx-10-1.1.static"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
inputFileHandler0.seek(339720055420)
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4)
    (docIDFromFile,numOfPostings) = unpack( "2I", byteString)
    print "docID:",docIDFromFile,numOfPostings
    if docIDFromFile == 50000002:
        break
    
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
        # print "----->",i,termID,static,dynamic,combined,score1
    
inputFileHandler0.close()

print "Ends."
print "Overall:"
print "ifn:",ifn
exit(1)
'''

print "Begins..."
documentResultDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/results/GOV2/TOP10_documentResults_OR_unpruned_testingQueries_20141020"
ifh0 = open(ifn0,"r")
l = ifh0.readline()
currentQID = ""
while l:
    le = l.strip().split(" ")
    currentQID = le[0]
    currentDocID = le[3]
    documentResult = currentQID + "_" + currentDocID
    if documentResult not in documentResultDict:
        documentResultDict[documentResult] = 1
    l = ifh0.readline()
ifh0.close()

improvedDocumentResultDict = {}
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/testNewPostingHit_1%_20141121"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/testNewPostingHit_5%_20141121"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/testNewPostingHit_10%_20141121"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/testNewPostingHit_15%_20141121"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/testNewPostingHit_20%_20141121"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/testNewPostingHit_30%_20141121"
ifh = open(ifn,"r")
currentQID = ""
for line in ifh.readlines():
    le = line.strip().split(" ")
    if line.strip().startswith("qid:"):
        currentQID = le[1]
    
    if line.strip().startswith("Score:"):
        currentDocID = le[2].strip().split("\t")[0]
        documentResult = currentQID + "_" + currentDocID
        if documentResult not in improvedDocumentResultDict:
            improvedDocumentResultDict[documentResult] = 1

counter = 0
for documentResult in improvedDocumentResultDict:
    if documentResult in documentResultDict:
        counter += 1
    
print "Overall:"
print "len(documentResultDict):",len(documentResultDict)
print "len(improvedDocumentResultDict):",len(improvedDocumentResultDict)
print "counter:",counter
ifh.close()
exit(1)

ifn = ""
ifnPart1 = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHit_1%_index/"
filesNeedToCopy = ["index.meta","index.idx","index.lex","index.ext"]
for i in range(0,25):
    ifnPart2 = "index" + "_" + str(i) + "M" + "_" + str(i+1) + "M" + "/"
    ifn = ifnPart1 + ifnPart2
    commandLine = ""
    for file in filesNeedToCopy:
        print "cp" + " " + ifn + file + " " + "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHit_1%_index/index_combined/" + file + ".0." + str(i) + " " + "&" 
    print
exit(1)



TOP10DocHitAndFreqDict = {}
counter = 0
fileNameDict = {}
mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHitHistogram_20141120"
f = []

for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break

for ifn in filenames:
    completePath = mypath + "/" + ifn
    # print "processing",completePath
    ifh = open(completePath,"r")
    l = ifh.readline()
    while l:
        le = l.strip().split(" ")
        docHit = int(le[0])
        freq = int(le[1])
        if docHit not in TOP10DocHitAndFreqDict:
            TOP10DocHitAndFreqDict[docHit] = freq
        else:
            TOP10DocHitAndFreqDict[docHit] += freq
        l = ifh.readline()

# print "Overall:"
# print "len(filenames):",len(filenames)
for docHit in TOP10DocHitAndFreqDict:
    print docHit,TOP10DocHitAndFreqDict[docHit]
exit(1)


ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/GOV2_documentPostingArray_0M_1M_docHit_postingHit_added"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/GOV2_documentPostingArray_0M_1M_docHit_postingHit_added"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/GOV2_documentPostingArray_10M_11M_docHit_postingHit_added"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/GOV2_documentPostingArray_0M_1M_docHit_postingHit_added"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/GOV2_documentPostingArray_9M_10M_docHit_postingHit_added"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/GOV2_documentPostingArray_10M_11M_docHit_postingHit_added"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
numOfBytesWritten = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (docIDFromFile,numOfPostings,numDocHit) = unpack( "3I", byteString)
    print docIDFromFile,numOfPostings,numDocHit
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4)
        (termID,impactScore,postingHit) = unpack( "1I1f1I", byteString)
        print "----->",i,termID,impactScore,postingHit
    
    if docIDFromFile == 1:
        exit(1)
    numOfBytesWritten += numOfPostings * 12 + 8
inputFileHandler0.close()
print "Ends."
print "Overall:"
print "ifn:",ifn
exit(1)

range_0_10_flag = True
range_0M_1M_flag = True
range_1M_2M_flag = True
range_2M_3M_flag = True
range_3M_4M_flag = True
range_4M_5M_flag = True
range_5M_6M_flag = True
range_6M_7M_flag = True
range_7M_8M_flag = True
range_8M_9M_flag = True
range_9M_10M_flag = True
range_10M_11M_flag = True
range_11M_12M_flag = True
range_12M_13M_flag = True
range_13M_14M_flag = True
range_14M_15M_flag = True
range_15M_16M_flag = True
range_16M_17M_flag = True
range_17M_18M_flag = True
range_18M_19M_flag = True
range_19M_20M_flag = True
range_20M_21M_flag = True
range_21M_22M_flag = True
range_22M_23M_flag = True
range_23M_24M_flag = True
range_24M_25M_flag = True
range_25M_END_flag = True

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHits_50M/50MQueries_TOP1000/binaryFiles/resultfile0_50MQueries_postingHits_20141106_final.binary0"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
currentDocID = -1
numOfPostings = 0
numOfDocs = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    numOfPostings += 1
    (docID,termID,postingHitFreq) = unpack( "3I", byteString)
    # print docID,termID,postingHitFreq,numOfBytesRead
    # print numOfPostings
    if currentDocID != docID:
        numOfDocs += 1
        
        currentDocID = docID
        if docID >= 0 and range_0_10_flag:
            print docID,numOfBytesRead
            range_0_10_flag = False
        if docID >= 1000000 and range_0_10_flag:
            print docID,numOfBytesRead
            range_0_10_flag = False
        if docID >= 0 and range_0_10_flag:
            print docID,numOfBytesRead
            range_0_10_flag = False
        if docID >= 0 and range_0_10_flag:
            print docID,numOfBytesRead
            range_0_10_flag = False
        if docID >= 0 and range_0_10_flag:
            print docID,numOfBytesRead
            range_0_10_flag = False
        if docID >= 0 and range_0_10_flag:
            print docID,numOfBytesRead
            range_0_10_flag = False        
        
        if numOfDocs % 1000 == 0:
            print numOfDocs,"docs processed."
    
    #if numOfPostings % 1000 == 0:
    #    print numOfPostings,"postings processed."
    
    numOfBytesRead += 12
exit(1)

'''
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/GOV2_documentPostingArray_docHit_postingHit_0M_1M"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (docIDFromFile,numOfPostings,docHit) = unpack( "3I", byteString)
    print "docID:",docIDFromFile,numOfPostings,docHit
    numOfBytesRead += 12
    
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4)
        (termID,score,postingHit) = unpack( "1I1f1I", byteString)
        # print "----->",i,termID,score,postingHit
        numOfBytesRead += 12
    #if docIDFromFile == 4:
    #    break
    
inputFileHandler0.close()
print "Ends."
exit(1)
'''

'''
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHits_50M/50MQueries_TOP1000/binaryFiles/resultfile0_50MQueries_postingHits_20141106_final.binary0"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
inputFileHandler0.seek(304560)
numOfBytesRead = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (docID,termID,postingHitFreq) = unpack( "3I", byteString)
    print docID,termID,postingHitFreq
    exit(1)
    numOfBytesRead += 12
exit(1)
'''



range_0_10_flag = True
range_10_20_flag = True
range_20_30_flag = True

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHits_50M/50MQueries_TOP1000/binaryFiles/resultfile0_50MQueries_postingHits_20141106_final.binary0"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0

currentDocID = -1
inputFileHandler0.seek(1104)
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (docID,termID,postingHitFreq) = unpack( "3I", byteString)
    # print docID,termID,postingHitFreq
    if currentDocID != docID:
    	currentDocID = docID
    	if docID >= 0 and range_0_10_flag:
    		print docID,numOfBytesRead
    		range_0_10_flag = False
    	
    	if docID >= 10 and range_10_20_flag:
			print docID,numOfBytesRead
			range_10_20_flag = False
    
    numOfBytesRead += 12
exit(1)



histogramDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHits_50M/50MQueries_TOP1000/binaryFiles/resultfile0_50MQueries_postingHits_20141106_final.binary0"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
sumPostingHit = 0
postingCounter = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (docID,termID,postingHitFreq) = unpack( "3I", byteString)
    if postingHitFreq not in histogramDict:
    	histogramDict[postingHitFreq] = 1
    else:
    	histogramDict[postingHitFreq] += 1
    sumPostingHit += postingHitFreq
    # print docID,termID,postingHitFreq
    postingCounter += 1
    if postingCounter % 1000000 == 0:
    	# print "postingCounter:",postingCounter
    	# print "sumPostingHit:",sumPostingHit
    	print "*****"
    	print postingCounter,sumPostingHit
    	for postingHit in histogramDict:
    		print postingHit,histogramDict[postingHit]
inputFileHandler0.close()
print "sumPostingHit:",sumPostingHit
print "Overall:"
for postingHit in histogramDict:
	print postingHit,histogramDict[postingHit]
print "Ends."
exit(1)

numOfPostingsLandmarkDict = {}
totalNumOfPostings = 6451948010
debugPercent = 10
onePercent = totalNumOfPostings * 0.01
twoPercent = totalNumOfPostings * 0.02
fivePercent = totalNumOfPostings * 0.05
tenPercent = totalNumOfPostings * 0.1
fifteenPercent = totalNumOfPostings * 0.15
twentyPercent = totalNumOfPostings * 0.2
thirtyPercent = totalNumOfPostings * 0.3

print debugPercent
print onePercent
print twoPercent
print fivePercent
print tenPercent
print fifteenPercent
print twentyPercent
print thirtyPercent

numOfPostingsLandmarkDict[debugPercent] = 1
numOfPostingsLandmarkDict[onePercent] = 1
numOfPostingsLandmarkDict[twoPercent] = 1
numOfPostingsLandmarkDict[fivePercent] = 1
numOfPostingsLandmarkDict[tenPercent] = 1
numOfPostingsLandmarkDict[fifteenPercent] = 1
numOfPostingsLandmarkDict[twentyPercent] = 1
numOfPostingsLandmarkDict[thirtyPercent] = 1

numOfPostingProcessed = 0
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHits_10M/postingHitFreq_ALL_sortedByFreq_20140831"
ifh = open(ifn,"r")
l = ifh.readline()
numOfPostingProcessed += 1
while l:
	l = ifh.readline()
	numOfPostingProcessed += 1
	
	if numOfPostingProcessed % 1000000 == 0:
		print numOfPostingProcessed,"processed."
	
	if numOfPostingProcessed in numOfPostingsLandmarkDict:
		le = l.strip().split(" ")
		print "----->",numOfPostingProcessed,le[1]
ifh.close()
exit(1)

counter = 0
fileNameDict = {}
mypath = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2/100MFakeQueryLog/seperateFiles"
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
for ifn in filenames:
    completePath = mypath + "/" + ifn
    ofn = completePath + "_" + "unique"
    ofh = open(ofn,"w")
    queryDict = {}
    ifh = open(completePath,"r")
    l = ifh.readline()
    lineCounter = 0
    while l:
	    queryContent = l.strip()
	    if queryContent not in queryDict:
		    queryDict[queryContent] = 1
	    else:
		    pass
	    l = ifh.readline()
	    lineCounter += 1
    ifh.close()
    for queryContent in queryDict:
	    ofh.write(queryContent + "\n")
    ofh.close()

    print "subOverall:"
    print "ifn:",ifn,lineCounter
    print "ofn:",ofn,len(queryDict)

print "Overall:"
print "len(filenames):",len(filenames)
exit(1)

# Juan's clueweb09B size: 341211101072
# correct size: 341911482664
termIDList = []
# ifn = "/home/vgc/juanr/cw09b.bigrams-10-1.1.static.111014"
# ifn = "/san_data/research/juanr/cw09b.bigrams-10-1.1.dyn.111014"
ifn = "/san_data/research/wei/workspace/NYU_IRTK/data/Clueweb09B/clueweb09B/unigramDocumentPostingArray/LEAVE_documentPostingArray_clueweb09B_0M_2M.binary"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4)
    (docIDFromFile,numOfPostings) = unpack( "2I", byteString)
    print "docID:",docIDFromFile,numOfPostings
    
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
        # print "----->",i,termID,static,dynamic,combined,score1
        termIDList.append(termID)
    termIDList.sort(cmp=None, key=None, reverse=False)
    for termID in termIDList:
        print termID
    exit(1)
inputFileHandler0.close()
print "Ends."
exit(1)

lowerBound = int(sys.argv[1])
upperBound = lowerBound + 1000000

fileKeyAndPositionDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/data/Clueweb09B/LEAVE_auxFileForNavigatingBinaryPostingFile_WHOLE_oneCombinedFile"
ifh0 = open(ifn0,"r")
for line in ifh0.readlines():
    lineElements = line.strip().split(" ")
    beginningKey = int(lineElements[0])
    beginningPosition = int(lineElements[1])
    fileKeyAndPositionDict[beginningKey] = beginningPosition 
ifh0.close()
print "len(fileKeyAndPositionDict):",len(fileKeyAndPositionDict)

# Juan's clueweb09B size: 341211101072
# correct size: 341911482664
ifn = "/home/vgc/juanr/cw09b.bigrams-10-1.1.static.111014"
# ifn = "/san_data/research/juanr/cw09b.bigrams-10-1.1.dyn.111014"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
inputFileHandler0.seek(fileKeyAndPositionDict[lowerBound])
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4)
    (docIDFromFile,numOfPostings) = unpack( "2I", byteString)
    print "docID:",docIDFromFile,numOfPostings
    
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
        # print "----->",i,termID,static,dynamic,combined,score1
    
    if lowerBound == docIDFromFile:
        pass
    else:
        print "not GOOD."
        print "lowerBound:",lowerBound
        print "docIDFromFile:",docIDFromFile
        exit(1)
        
    lowerBound += 1
    
inputFileHandler0.close()
print "Ends."
exit(1)

ifn = "/san_data/research/wei/workspace/NYU_IRTK/data/GOV2/bigramFromJuan/documentPostingArrays/gov2/bigrams-v-10.static/documentPostingArray_0M_1M"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4)
    (docID,numOfPostings) = unpack( "2I", byteString)
    print "docID:",docID,numOfPostings
    
    for i in range(0,numOfPostings):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
        print "----->",i,termID,static,dynamic,combined,score1
    
    if docID == 0:
        break
    numOfDocumentsProcessed += 1
    numOfBytesRead += 8 + score1 * 4 * 5
        
inputFileHandler0.close()
print "Ends."
exit(1)


numOfBytesRead = 0
inputFileList = []
inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange0_20140607"
inputFileName2 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange1_20140607"
inputFileName3 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange2_20140607"
inputFileName4 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange3_20140607"
inputFileName5 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange4_20140607"
inputFileName6 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange5_20140607"
inputFileName7 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange6_20140607"
inputFileName8 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange7_20140607"
inputFileName9 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange8_20140607"
inputFileName10 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange9_20140607"
inputFileList.append(inputFileName1)
#inputFileList.append(inputFileName2)
#inputFileList.append(inputFileName3)
#inputFileList.append(inputFileName4)
#inputFileList.append(inputFileName5)
#inputFileList.append(inputFileName6)
#inputFileList.append(inputFileName7)
#inputFileList.append(inputFileName8)
#inputFileList.append(inputFileName9)
#inputFileList.append(inputFileName10)

totalNumOfPostings = 0
totalNumOfTerms = 0
for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"r")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    numOfBytes = 0

    while numOfBytes < fileSize:
            byteString = inputFileHandler0.read(4 + 4)
            (termID,score1) = unpack( "2I", byteString)
            print termID,score1
            totalNumOfPostings += score1
            totalNumOfTerms += 1
            '''
            for i in range(0,score1):
                byteString = inputFileHandler0.read(4 + 4 + 4)
                (docID,impactScore,rank) = unpack( "1I1f1I", byteString)
                print docID,impactScore,rank
            '''
            numOfBytes += 4 + 4 + score1 * 12
            inputFileHandler0.seek(numOfBytes)
            if totalNumOfTerms % 100000 == 0:
                print "# of terms processed:",totalNumOfTerms
    print "numOfBytes:",numOfBytes
    print "totalNumOfPostings:",totalNumOfPostings
    print "totalNumOfTerms:",totalNumOfTerms
print "overall:"
print "totalNumOfPostings:",totalNumOfPostings
print "totalNumOfTerms:",totalNumOfTerms
print "Ends."
inputFileHandler0.close()
exit(1)

counter = 0
fileNameDict = {}
mypath = "/san_share/wei/workspace/NYU_IRTK/data/Clueweb09B_document_posting_arrays_withTheRankReplacingTheImpactScore/"
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
filenames.sort()
for ifn in filenames:
    print ifn
    statinfo = os.stat(mypath + ifn)
    fileSize = statinfo.st_size
    print "currentInputFileName: ",ifn
    print "file size:",fileSize
    fileNameDict[ifn] = fileSize
print "Overall:"
print "len(fileNameDict):",fileNameDict
pathStr = "Clueweb09B_documentPostingArray_"
print "0",counter
for i in range(0,50):
    pathStr = "Clueweb09B_documentPostingArray_" + str(i) + "M" + "_" + str(i+1) + "M"
    # print pathStr,fileNameDict[pathStr]
    # print fileNameDict[pathStr]
    counter += fileNameDict[pathStr]
    print (i+1) * 1000000,counter
exit(1)

# make the binary into the plaintext format
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/10MQueries_OLD/binaryFiles/resultfile0_10MQueries_previous_final"
ofh = open(ofn,"w")
  
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/10MQueries_OLD/binaryFiles/resultfile0_10MQueries_previous_final.binary"
fileList = []
fileList.append(ifn)
for ifn in fileList:
    ifh = open(ifn,"rb")
    statinfo = os.stat(ifn)
    fileSize = statinfo.st_size
    print "currentInputFileName: ",ifn
    print "file size:",fileSize
    numOfBytesRead = 0
    sumHitFreq = 0
    while numOfBytesRead < fileSize:
        byteString = ifh.read(4 + 4)
        (docID,freq) = unpack( "2I", byteString)
        # print docID,freq
        ofh.write(str(docID) + " " + str(freq) + "\n")
        sumHitFreq += freq
        numOfBytesRead += 4 * 2
    ifh.close()
    print "ifn:",ifn
    print "sumHitFreq:",sumHitFreq
print "Overall:"
print "ofn:",ofn
ofh.close()
exit(1)

totalNumOfPostings = 6451948010
onePercent = totalNumOfPostings * 0.01
twoPercent = totalNumOfPostings * 0.02
fivePercent = totalNumOfPostings * 0.05
tenPercent = totalNumOfPostings * 0.1
twentyPercent = totalNumOfPostings * 0.2
thirtyPercent = totalNumOfPostings * 0.3
fortyPercent = totalNumOfPostings * 0.4
fiftyPercent = totalNumOfPostings * 0.5

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/documentPartitionFileBasedOnDocHitsDividedBySize_20141102_50%"
ofh = open(ofn,"w")
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/resultfile0_50MQueries_final_withNumOfPostingsAdded_sortedByDocHitsDividedByDocSize"
ifh = open(ifn,"r")
l = ifh.readline()
postingSum = 0
while l:
    le = l.strip().split(" ")
    docID = le[0]
    currentPosting = int(le[2])
    postingSum += currentPosting
    if postingSum > fiftyPercent:
        break
    ofh.write(str(docID) + " " + "GXSth" + "\n")
    l = ifh.readline()
ifh.close()
ofh.close()
print "Overall:"
print "postingSum:",postingSum
print "ifn:",ifn
print "ofn:",ofn
exit(1)

# make the binary into the plaintext format
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/10MQueries_OLD/binaryFiles/resultfile0_10MQueries_previous_final"
ofh = open(ofn,"w")
  
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/10MQueries_OLD/binaryFiles/resultfile0_10MQueries_previous_final.binary"
fileList = []
fileList.append(ifn)
for ifn in fileList:
    ifh = open(ifn,"rb")
    statinfo = os.stat(ifn)
    fileSize = statinfo.st_size
    print "currentInputFileName: ",ifn
    print "file size:",fileSize
    numOfBytesRead = 0
    sumHitFreq = 0
    while numOfBytesRead < fileSize:
        byteString = ifh.read(4 + 4)
        (docID,freq) = unpack( "2I", byteString)
        # print docID,freq
        ofh.write(str(docID) + " " + str(freq) + "\n")
        sumHitFreq += freq
        numOfBytesRead += 4 * 2
    ifh.close()
    print "ifn:",ifn
    print "sumHitFreq:",sumHitFreq
print "Overall:"
print "ofn:",ofn
ofh.close()
exit(1)

# The operation of checking the hits
# simple checking for the correctness of the resultfile0_RAW and resultfile0_FINAL
# ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/point3MQueries/binaryFiles/resultfile0_point3MQueries_final.binary"
# ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/point3MQueries/binaryFiles/resultfile0_point3MQueries_raw.binary"
#ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/1MQueries/binaryFiles/resultfile0_1MQueries_final.binary"
#ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/1MQueries/binaryFiles/resultfile0_1MQueries_raw.binary0"
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/10MQueries_OLD/binaryFiles/resultfile0_10MQueries_previous_raw.binary0"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/10MQueries_OLD/binaryFiles/resultfile0_10MQueries_previous_final.binary"

fileList = []
fileList.append(ifn0)
fileList.append(ifn1)
for ifn in fileList:
    ifh = open(ifn,"rb")
    statinfo = os.stat(ifn)
    fileSize = statinfo.st_size
    print "currentInputFileName: ",ifn
    print "file size:",fileSize
    numOfBytesRead = 0
    sumHitFreq = 0
    while numOfBytesRead < fileSize:
        byteString = ifh.read(4 + 4)
        (docID,freq) = unpack( "2I", byteString)
        # print docID,freq
        sumHitFreq += freq
        numOfBytesRead += 4 * 2
    ifh.close()
    print "Overall:"
    print "ifn:",ifn
    print "sumHitFreq:",sumHitFreq
exit(1)











ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/binaryFiles/resultfile0_FINAL"
fileList = []
fileList.append(ifn0)
for ifn in fileList:
    ifh = open(ifn,"rb")
    statinfo = os.stat(ifn)
    fileSize = statinfo.st_size
    print "currentInputFileName: ",ifn
    print "file size:",fileSize
    numOfBytesRead = 0
    sumHitFreq = 0
    while numOfBytesRead < fileSize:
        byteString = ifh.read(4 + 4)
        (docID,freq) = unpack( "2I", byteString)
        # print docID,freq
        sumHitFreq += freq
        numOfBytesRead += 4 * 2
    ifh.close()
    print "Overall:"
    print "ifn:",ifn
    print "sumHitFreq:",sumHitFreq
exit(1)






# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/xcx_docHits_2000.binary"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_20141030/binaryFiles/resultfile0"
ifh = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0

while numOfBytesRead < fileSize:
    byteString = ifh.read(4 + 4)
    (docID,freq) = unpack( "2I", byteString)
    print docID,freq
    if docID == 1:
        exit(1)
    
    numOfBytesRead += 4 * 2
ifh.close()
exit(1)


#95001 1 15.8295 24914902 GX269-11-4995482
#95001 2 15.5578 22032813 GX234-54-7178342
qidWithGoldenDocumentResultListDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TOP10_documentResults_OR_unpruned_testingQueries_20141020"
ifh0 = open(ifn0,"r")
l = ifh0.readline()
currentQID = ""
while l:
    le = l.strip().split(" ")
    currentQID = le[0]
    if currentQID not in qidWithGoldenDocumentResultListDict:
        qidWithGoldenDocumentResultListDict[currentQID] = []
    qidWithGoldenDocumentResultListDict[currentQID].append(le[3])
    l = ifh0.readline()
ifh0.close()
print "Overall:"
print "len(qidWithGoldenDocumentResultListDict):",len(qidWithGoldenDocumentResultListDict)
sum1 = 0
for qid in qidWithGoldenDocumentResultListDict:
    sum1 += len(qidWithGoldenDocumentResultListDict[qid])
print "sum1:",sum1
print "qidWithGoldenDocumentResultListDict['95001']:",qidWithGoldenDocumentResultListDict['95001']

qidWithDocumentResultListDict = {}
documentResultDict = {}
fileNameList = []

# combination1
#ifn11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_max-score_UPP-5_1%_20141028"
#ifn21 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_max-score_docHits_10%_20141028"

# combination2
ifn12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_max-score_UPP-5_5%_20141029"
ifn22 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_max-score_docHits_5%_20141029"

# combination1
# fileNameList.append((ifn11,"UPP-5_1%"))
# fileNameList.append((ifn21,"docHits_10%"))

# combination2
fileNameList.append((ifn12,"UPP-5_5%"))
fileNameList.append((ifn22,"docHits_5%"))

counter = 0

for fileTuple in fileNameList:
    (fileName,idString) = fileTuple
    ifh = open(fileName,"r")
    currentQID = ""
    for line in ifh.readlines():
        le = line.strip().split(" ")
        if line.strip().startswith("qid:"):
            currentQID = le[1]
        
        if len(le) == 15 and le[-2].startswith("GX"):
            rank = le[0]
            score = float(le[-4])
            docID = le[-3]
            documentResult = currentQID + "_" + docID
            if documentResult in documentResultDict:
                pass
            else:
                documentResultDict[documentResult] = 1
                candidateTuple = (rank,score,docID,idString)
                if currentQID not in qidWithDocumentResultListDict:
                    qidWithDocumentResultListDict[currentQID] = []
                qidWithDocumentResultListDict[currentQID].append(candidateTuple)
    
    '''
    print "len(qidWithDocumentResultListDict):",len(qidWithDocumentResultListDict)
    qidWithDocumentResultListDict['95001'].sort(cmp=None, key=itemgetter(1), reverse=True)
    for candidateTuple in qidWithDocumentResultListDict['95001']:
        print candidateTuple
    '''
    ifh.close()

print "debug:"
print "len(qidWithDocumentResultListDict):",len(qidWithDocumentResultListDict)
# print "qidWithDocumentResultListDict['95001']:",qidWithDocumentResultListDict['95001']
sum2 = 0
SLAAnsweringHistogramDict = {}
SLAAnsweringHistogramDict[0] = 0
SLAAnsweringHistogramDict[1] = 0
SLAAnsweringHistogramDict[2] = 0
SLAAnsweringHistogramDict[3] = 0
SLAAnsweringHistogramDict[4] = 0
SLAAnsweringHistogramDict[5] = 0
SLAAnsweringHistogramDict[6] = 0
SLAAnsweringHistogramDict[7] = 0
SLAAnsweringHistogramDict[8] = 0
SLAAnsweringHistogramDict[9] = 0
SLAAnsweringHistogramDict[10] = 0
counterForEachQuery = 0
for qid in qidWithDocumentResultListDict:
    counterForEachQuery = 0
    sum2 += len(qidWithDocumentResultListDict[qid][0:10])
    qidWithDocumentResultListDict[qid].sort(cmp=None, key=itemgetter(1), reverse=True)
    # print "qid:",qid
    for documentCandidateTuple in qidWithDocumentResultListDict[qid][0:10]:
        (rank,score,docID,id) = documentCandidateTuple
        if docID in qidWithGoldenDocumentResultListDict[qid]:
            counter += 1
            counterForEachQuery += 1
    SLAAnsweringHistogramDict[counterForEachQuery] += 1
print "sum2:",sum2

print "Overall:"
print "counter:",counter
print "SLAAnsweringHistogramDict[0]:",SLAAnsweringHistogramDict[0]
print "SLAAnsweringHistogramDict[1]:",SLAAnsweringHistogramDict[1]
print "SLAAnsweringHistogramDict[2]:",SLAAnsweringHistogramDict[2]
print "SLAAnsweringHistogramDict[3]:",SLAAnsweringHistogramDict[3]
print "SLAAnsweringHistogramDict[4]:",SLAAnsweringHistogramDict[4]
print "SLAAnsweringHistogramDict[5]:",SLAAnsweringHistogramDict[5]
print "SLAAnsweringHistogramDict[6]:",SLAAnsweringHistogramDict[6]
print "SLAAnsweringHistogramDict[7]:",SLAAnsweringHistogramDict[7]
print "SLAAnsweringHistogramDict[8]:",SLAAnsweringHistogramDict[8]
print "SLAAnsweringHistogramDict[9]:",SLAAnsweringHistogramDict[9]
print "SLAAnsweringHistogramDict[10]:",SLAAnsweringHistogramDict[10]
exit(1)

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/scriptForRunningFakeQueries_20141027.sh"
ofh = open(ofn,"w")

mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/100M_fakeQueryLogRelated/"
basePart1 = "./irtk --local --query --query-mode=batch --query-algorithm=daat-and --result-format=pruning --config-options=" 
basePart2 = "batch_query_input_file=/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/100M_fakeQueryLogRelated/"
basePart3 = "LEAVE_wei_uniform_pruning_2013-09-12-16-12-30_None_None"
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
print f
counter = 0
for ifn in f:
    completeCommandPath = "#" + basePart1 + basePart2 + ifn + " " + basePart3 + " > " + mypath + ifn + "_rawResult" 
    print completeCommandPath
    ofh.write(completeCommandPath + "\n")
    counter += 1
    if counter % 10 == 0:
        print
        ofh.write("\n")
ofh.close()
print "Overall:"
print "ofn:",ofn
exit(1)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/3DProbabilityTableGeneration_20141025_toolkit_friendly"
ifh = open(ifn,"r")
l = ifh.readline()
sum = 0
while l:
    le = l.strip().split(" ")
    sum += float(le[1])
    l = ifh.readline()
ifh.close()
print "Overall:"
print "sum:",sum
exit(1)

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/3DProbabilityTableGeneration_20141025_toolkit_friendly"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/3DProbabilityTableGeneration_20141025_FIXED"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    currentKey = le[0] + "_" + le[1] + "_" + le[2]
    value = le[3]
    ofh.write(currentKey + " " + value + "\n")
    l = ifh.readline()
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
ofh.close()
exit(1)

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/3DProbabilityTableGeneration_20141025_FIXED"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/3DProbabilityTableGeneration_20141025_RAW"
ifh = open(ifn,"r")
l = ifh.readline()
counter = 0
minValue = 6.69585453985e-11
while l:
    le = l.strip().split(" ")
    probability = float(le[3])
    
    print le,probability
    if probability > 1:
        probability = minValue
        counter+=1
    if probability == 0.0:
        probability = minValue
        counter+=1
    ofh.write(le[0] + " " + le[1] + " " + le[2] + " " + str(probability) + "\n")
    l = ifh.readline()

print "Overall:"
print "counter:",counter
print "minValue:",minValue
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
ofh.close()
exit(1)

numerator3DProbablityTable = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/3DNumeratorTableGeneration_20141025_sorted"
ifh = open(ifn1,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    key1 = le[0]
    key2 = le[1]
    key3 = le[2]
    value = int(le[3])
    print key1,key2,key3,value
    if key1 not in numerator3DProbablityTable:
        numerator3DProbablityTable[key1] = {}
    if key2 not in numerator3DProbablityTable[key1]:
        numerator3DProbablityTable[key1][key2] = {}
    if key3 not in numerator3DProbablityTable[key1][key2]:
        numerator3DProbablityTable[key1][key2][key3] = value 
    l = ifh.readline()
ifh.close()
print "Overall:"
print "len(numerator3DProbablityTable):",len(numerator3DProbablityTable)

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/3DProbabilityTableGeneration_20141025_sorted"
ofh = open(ofn,"w")

denominator3DProbablityTable = {}
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/3DDenominatorTableGeneration_20141025_sorted"
ifh = open(ifn2,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    key1 = le[0]
    key2 = le[1]
    key3 = le[2]
    value = int(le[3])
    print key1,key2,key3,value
    if key1 not in denominator3DProbablityTable:
        denominator3DProbablityTable[key1] = {}
    if key2 not in denominator3DProbablityTable[key1]:
        denominator3DProbablityTable[key1][key2] = {}
    if key3 not in denominator3DProbablityTable[key1][key2]:
        denominator3DProbablityTable[key1][key2][key3] = value
    
    currentProbability = 0.0
    if denominator3DProbablityTable[key1][key2][key3] != 0.0:
        if key1 in numerator3DProbablityTable:
            if key2 in numerator3DProbablityTable[key1]:
                if key3 in numerator3DProbablityTable[key1][key2]:
                    currentProbability = numerator3DProbablityTable[key1][key2][key3] / denominator3DProbablityTable[key1][key2][key3]
    else:
        pass # Just do not do anything
    ofh.write(str(key1) + " " + str(key2) + " " + str(key3) + " " + str(currentProbability) + "\n")
    l = ifh.readline()
ifh.close()

print "Overall:"
print "len(numerator3DProbablityTable):",len(numerator3DProbablityTable)
print "len(denominator3DProbablityTable):",len(denominator3DProbablityTable)
print "ifn1:",ifn1
print "ifn2:",ifn2
print "ofn:",ofn
ofh.close()
exit(1)

# I need 2 things in order to build the denominator table
# (1) the freq of the term in the training query log
# (2) the # of postings in each range
# key: actual term for training
# value: real freq of this term appeared in the traing query log
trainingQueryTermsWithFreqDict = {}
# for gov2 dataset
# on vidaserver1
inputFileName0 = "/home/vgc/wei/workspace/NYU_IRTK/data/firstFactorProbability/realFreqOfTermsIn_100KQueries_0_1_95%"
inputFileHandler = open(inputFileName0,"r")

for index,line in enumerate( inputFileHandler.readlines() ):
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    currentTermRealFreqInTrainingQueires = int(lineElements[1])
    # ignore the lineElements[1] cause I don't need this info
    if currentTerm not in trainingQueryTermsWithFreqDict:
        trainingQueryTermsWithFreqDict[currentTerm] = currentTermRealFreqInTrainingQueires
    else:
        print "duplicated terms found."
        exit(1)
print "len(trainingQueryTermsWithFreqDict):",len(trainingQueryTermsWithFreqDict)
# print "trainingQueryTermsWithFreqDict['of']:",trainingQueryTermsWithFreqDict['of']
# print "trainingQueryTermsWithFreqDict['fawn']:",trainingQueryTermsWithFreqDict['fawn']
inputFileHandler.close()

# 3 dimensional array
# key1: list length class label
# key2: rangeID
# key3: docHit range
threeDimensionalArray = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/3D_DenominatorTable_20141025/all_termDocHitsIntegration_sortedByListLengthLabel_20141009"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    currentTerm = le[0]
    listLengthClassLabel = int(le[2])
    if listLengthClassLabel not in threeDimensionalArray:
        threeDimensionalArray[listLengthClassLabel] = {}
    for i in range(4,len(le)):
        if i not in threeDimensionalArray[listLengthClassLabel]:
            threeDimensionalArray[listLengthClassLabel][i] = 0
        if currentTerm not in trainingQueryTermsWithFreqDict:
            pass
        else:
            threeDimensionalArray[listLengthClassLabel][i] += int(le[i]) * trainingQueryTermsWithFreqDict[currentTerm]
    l = ifh.readline()
ifh.close()

#print "Overall:"
#print "len(threeDimensionalArray):",len(threeDimensionalArray)
secondDimensionID = 0
for i in range(0,69):
    #print threeDimensionalArray[i]
    #print len(threeDimensionalArray[i])
    for j in range(4,4+len(threeDimensionalArray[i])):
        print i,int((j-4)/14),(j-4)%14,threeDimensionalArray[i][j]
    '''
    for j in range(0,len(threeDimensionalArray[i])):
        if j not in threeDimensionalArray[i]:
            print i,secondDimensionID,j,0
        else:
            print i,secondDimensionID,j,threeDimensionalArray[i][j]
    '''
exit(1)

# a check from 3D to 2D
numerator3DProbablityTable = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/numeratorTableGeneration_20141025"
ifh = open(ifn1,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    key1 = le[0]
    key2 = le[1]
    key3 = le[2]
    value = int(le[3])
    # print key1,key2,key3,value
    if key1 not in numerator3DProbablityTable:
        numerator3DProbablityTable[key1] = {}
    if key2 not in numerator3DProbablityTable[key1]:
        numerator3DProbablityTable[key1][key2] = {}
    if key3 not in numerator3DProbablityTable[key1][key2]:
        numerator3DProbablityTable[key1][key2][key3] = value 
    l = ifh.readline()
ifh.close()
print "Overall:"
print "len(numerator3DProbablityTable):",len(numerator3DProbablityTable)
sum = 0
for key1 in numerator3DProbablityTable:
    for key2 in numerator3DProbablityTable[key1]:
        sum = 0
        for key3 in numerator3DProbablityTable[key1][key2]:
            # print key1,key2,numerator3DProbablityTable[key1][key2]
            sum += numerator3DProbablityTable[key1][key2][key3]
        print key1,key2,sum
exit(1)

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/probablityTableGeneration_20141025"
ofh = open(ofn,"w")

denominator3DProbablityTable = {}
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/denominatorTableGeneration_20141025"
ifh = open(ifn2,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    key1 = le[0]
    key2 = le[1]
    key3 = le[2]
    value = int(le[3])
    print key1,key2,key3,value
    if key1 not in denominator3DProbablityTable:
        denominator3DProbablityTable[key1] = {}
    if key2 not in denominator3DProbablityTable[key1]:
        denominator3DProbablityTable[key1][key2] = {}
    if key3 not in denominator3DProbablityTable[key1][key2]:
        denominator3DProbablityTable[key1][key2][key3] = value
    
    currentProbability = 0.0
    if denominator3DProbablityTable[key1][key2][key3] != 0.0:
        if key1 in numerator3DProbablityTable:
            if key2 in numerator3DProbablityTable[key1]:
                if key3 in numerator3DProbablityTable[key1][key2]:
                    currentProbability = numerator3DProbablityTable[key1][key2][key3] / denominator3DProbablityTable[key1][key2][key3]
    else:
        pass # Just do not do anything
    ofh.write(str(key1) + " " + str(key2) + " " + str(key3) + " " + str(currentProbability) + "\n")
    l = ifh.readline()
ifh.close()

print "Overall:"
print "len(denominator3DProbablityTable):",len(denominator3DProbablityTable)
print "ifn1:",ifn1
print "ifn2:",ifn2
print "ofn:",ofn
ofh.close()
exit(1)





ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tonsOfIndexesMaking_20140920/"
targetDir = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tonsOfIndexesMaking_20140920/"
for dirname, dirnames, filenames in os.walk(ifn):
    for dirname in dirnames:
        completeDirName = ifn + dirname
        print "mv",completeDirName,targetDir,"&"
exit(1)

# step1:
qidWithUnprunedPAt10Dict = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/humanJudgeQueries_pAt10_MappingTable_100%"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    value = float(le[1])
    qidWithUnprunedPAt10Dict[qid] = value
    l = ifh1.readline()
ifh1.close()
print "Overall:"
print "len(qidWithUnprunedPAt10Dict):",len(qidWithUnprunedPAt10Dict)

# step2:
qidWithDocHits_10Percent_PAt10Dict = {}
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/humanJudgeQueries_pAt10_MappingTable_docHits_10%"
ifh2 = open(ifn2,"r")
l = ifh2.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    value = float(le[1])
    qidWithDocHits_10Percent_PAt10Dict[qid] = value
    l = ifh2.readline()
ifh1.close()
print "Overall:"
print "len(qidWithDocHits_10Percent_PAt10Dict):",len(qidWithDocHits_10Percent_PAt10Dict)

# step3 and step4:
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_10%_20141022_FINAL_extended"
ofh = open(ofn,"w")

ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_10%_20141022_FINAL"
ifh4 = open(ifn4,"r")
l = ifh4.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    ofh.write(le[0] + " " + le[1] + " " + le[2] + " " + le[3] + " " + le[4] + " " + le[5] + " " + str(qidWithDocHits_10Percent_PAt10Dict[qid]) + " " + str(qidWithUnprunedPAt10Dict[qid]) + "\n")
    l = ifh4.readline()
ifh4.close()
ofh.close()
print "Overall:"
print "ofn:",ofn
exit(1)

ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/humanJudgeQueries_pAt10_MappingTable_100%"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/humanJudgeQueries_pAt10_MappingTable_docHits_10%"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/humanJudgeQueries_pAt10_MappingTable_UPP-5_ptPowTo0_10%"
ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHits_10%_20141022_FINAL"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_10%_ptPowTo0_20141022_FINAL"

qidWithUnprunedPAt10Dict = {}
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    value = float(le[1])
    qidWithUnprunedPAt10Dict[qid] = value
    l = ifh1.readline()
ifh1.close()
print "Overall:"
print "len(qidWithUnprunedPAt10Dict):",len(qidWithUnprunedPAt10Dict)

qidWithDocHits_10Percent_PAt10Dict = {}
ifh2 = open(ifn2,"r")
l = ifh2.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    value = float(le[1])
    qidWithDocHits_10Percent_PAt10Dict[qid] = value
    l = ifh2.readline()
ifh1.close()
print "Overall:"
print "len(qidWithDocHits_10Percent_PAt10Dict):",len(qidWithDocHits_10Percent_PAt10Dict)

qidProcessedFlagDict = {}

# docHits 10% tier1
markList1 = [91,120,126,131]
# UPP-5 10% tier1
markList2 = [124,135,137,140,150]

print "**********docHits 10%"
ifh4 = open(ifn4,"r")
l = ifh4.readline()
counter = 1
sumQualityValuePart1 = 0.0
sumQualityValuePart2 = 0.0
while l:
    if counter in markList1:
        print counter,"marked"
        sumQualityValuePart2 = sumQualityValuePart1
        for qid in qidWithDocHits_10Percent_PAt10Dict:
            if qid not in qidProcessedFlagDict: 
                sumQualityValuePart2 += qidWithDocHits_10Percent_PAt10Dict[qid]
        print sumQualityValuePart1/counter,counter
        print sumQualityValuePart2/149,149
    
    le = l.strip().split(" ")
    qid = le[0]
    
    qidProcessedFlagDict[qid] = True
    sumQualityValuePart1 += qidWithUnprunedPAt10Dict[qid]
    
    l = ifh4.readline()
    counter += 1
ifh4.close()

print "**********UPP-5 10%"
ifh4 = open(ifn4,"r")
l = ifh4.readline()
counter = 1
sumQualityValuePart1 = 0.0
sumQualityValuePart2 = 0.0
while l:
    if counter in markList2:
        print counter,"marked"
        sumQualityValuePart2 = sumQualityValuePart1
        for qid in qidWithDocHits_10Percent_PAt10Dict:
            if qid not in qidProcessedFlagDict: 
                sumQualityValuePart2 += qidWithDocHits_10Percent_PAt10Dict[qid]
        print sumQualityValuePart1/counter,counter
        print sumQualityValuePart2/149,149
    
    le = l.strip().split(" ")
    qid = le[0]
    
    qidProcessedFlagDict[qid] = True
    sumQualityValuePart1 += qidWithUnprunedPAt10Dict[qid]
    
    l = ifh4.readline()
    counter += 1
ifh4.close()
print "**********"
exit(1)

# unpruned 100%
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/qualityEvaluationTools/trecEvalOutput_20141024"
# docHits_10%_ptPowTo0
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/qualityEvaluationTools/debug_docHits_10%_ptPowTo0"
# UPP-5_10%_ptPowTo0
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/qualityEvaluationTools/debug_UPP-5_10%_ptPowTo0"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split("\t")
    # print le
    if le[0].strip() == "P_10":
        print le[1],le[2]
    l = ifh.readline()
ifh.close()
print "Overall:"
print ifn
exit(1)

reimbursementFlag = False
for i in range(0,1):
    if i == 0:
        reimbursementFlag = False
    else:
        reimbursementFlag = True
    
    # key: qid
    # value: cost in ms
    qidTier1CostDict = {}
    '''
    # UPP-5_10%_ptPowTo1
    # completeFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_10%_ptPowTo1/another5000_cost"
    # UPP-5_10%_ptPowTo0
    # completeFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_10%_ptPowTo0/another5000_cost"
    # UPP-5_20%_ptPowTo0
    # completeFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_20%_ptPowTo0/another5000_cost"
    # UPP-5_30%_ptPowTo0
    # completeFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_30%_ptPowTo0/another5000_cost"
    # UPP-5_40%_ptPowTo0
    # completeFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_40%_ptPowTo0/another5000_cost"
    ifh = open(completeFileName,"r")
    for l in ifh.readlines():
        le = l.strip().split(" ")
        qid = le[0]
        costInTier1 = float(le[1])
        qidTier1CostDict[qid] = costInTier1
    print "len(qidTier1CostDict):",len(qidTier1CostDict)
    # print "qidTier1CostDict['0']:",qidTier1CostDict['0']
    ifh.close()
    '''
    if reimbursementFlag:
        pass
    else:
        qidTier1CostDict = {} # turn the reimbursement OFF
        
    # for the SLA requirement
    # key: freq
    # value: numOfQueriesBelongingToThisGroup
    histogramDictOriginal = {}
    histogramDictOriginal[0] = 0
    histogramDictOriginal[1] = 0
    histogramDictOriginal[2] = 0
    histogramDictOriginal[3] = 0
    histogramDictOriginal[4] = 0
    histogramDictOriginal[5] = 0
    histogramDictOriginal[6] = 0
    histogramDictOriginal[7] = 0
    histogramDictOriginal[8] = 0
    histogramDictOriginal[9] = 0
    histogramDictOriginal[10] = 0
    
    '''
    # histogram loading process
    # UPP-5_10%_ptPowTo1
    # ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_10%_ptPowTo1/another5000_gain"
    # UPP-5_10%_ptPowTo0
    ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_10%_ptPowTo0/another5000_gain"
    ifh = open(ifn,"r")
    l = ifh.readline()
    while l:
        le = l.strip().split(" ")
        numOfResultsRetained = int(le[2])
        histogramDictOriginal[numOfResultsRetained] += 1
        l = ifh.readline()
    ifh.close()
    print "Overall:"
    sum = 0
    for i in range(0,11):
        sum += histogramDictOriginal[i]
    print "sum:",sum
    '''
    
    # Let's add the histogram thing cause I really want to see it.
    # Let's check the machine learned curves first
    # The two kinds of curves are separated so let's try to plot ONLY one kind of curve first.
    # one for prediction and one for optimal
    basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
    fileList = []
    
    '''
    # 10%, ptPowTo0
    #plotTuple1 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_20141018_sortedByPredictedGain","UPP-5_10%_ptPowTo0_predictedGain")
    plotTuple2 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_20141018_sortedByPredictedCost","UPP-5_10%_ptPowTo0_predictedCost")
    plotTuple3 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_20141018_sortedByPredictedGainCostRatio","UPP-5_10%_ptPowTo0_predictedGainCostRatio_original")
    #plotTuple4 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_20141018_sortedByRandom","UPP-5_10%_ptPowTo0_sortedByRandom")
    #plotTuple5 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_20141018_sortedByActualGain","UPP-5_10%_ptPowTo0_actualGain")
    #plotTuple6 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_20141018_sortedByActualCost","UPP-5_10%_ptPowTo0_actualCost")
    plotTuple7 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_20141018_sortedByActualGainCostRatio","UPP-5_10%_ptPowTo0_actualGainCostRatio")
    #plotTuple8 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_20141018_sortedByActuralGainPredictedCostRatio","UPP-5_10%_ptPowTo0_actualGainPredictedCostRatio")
    plotTuple9 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_bottomCost1ms_FINAL","UPP-5_10%_ptPowTo0_predictedGainCostRatio_1ms")
    
    plotTuple10 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_20141023_sortedByPredictedGainCostRatio_2ms_FINAL","UPP-5_10%_ptPowTo0_predictedGainCostRatio_2ms")
    plotTuple11 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_20141023_sortedByPredictedGainCostRatio_4ms_FINAL","UPP-5_10%_ptPowTo0_predictedGainCostRatio_4ms")
    '''
    
    '''
    # 10%, ptPowTo0
    plotTuple1 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_bottomCost1ms_FINAL","UPP-5_10%_ptPowTo0_predictedGainCostRatio")
    # 20%, ptPowTo0
    plotTuple2 = (20555,49966,23491.9,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_20%_ptPowTo0_bottomCost1ms_FINAL","UPP-5_20%_ptPowTo0_predictedGainCostRatio")
    # 30%, ptPowTo0
    plotTuple3 = (26889,49966,64417.5,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_30%_ptPowTo0_bottomCost1ms_FINAL","UPP-5_30%_ptPowTo0_predictedGainCostRatio")
    # 40%, ptPowTo0
    plotTuple4 = (31545,49966,118171,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_40%_ptPowTo0_bottomCost1ms_FINAL","UPP-5_40%_ptPowTo0_predictedGainCostRatio")
    # 50%, ptPowTo0
    plotTuple5 = (36051,49966,188043,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_50%_ptPowTo0_bottomCost1ms_FINAL","UPP-5_50%_ptPowTo0_predictedGainCostRatio")
    '''
    
    '''
    # 10%, ptPowTo1
    plotTuple1 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByPredictedGain","UPP-5_10%_ptPowTo1_predictedGain")
    plotTuple2 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByPredictedCost","UPP-5_10%_ptPowTo1_predictedCost")
    plotTuple3 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByPredictedGainCostRatio","UPP-5_10%_ptPowTo1_predictedGainCostRatio_ORIGINAL")
    plotTuple4 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByRandom","UPP-5_10%_ptPowTo1_sortedByRandom")
    plotTuple5 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByActuralGain","UPP-5_10%_ptPowTo1_actualGain")
    plotTuple6 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByActuralCost","UPP-5_10%_ptPowTo1_actualCost")
    plotTuple7 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByActuralGainCostRatio","UPP-5_10%_ptPowTo1_actualGainCostRatio")
    plotTuple8 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByActuralGainPredictedCostRatio","UPP-5_10%_ptPowTo1_actualGainPredictedCostRatio")
    
    plotTuple9 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByPredictedGainCostRatio_head4600","UPP-5_10%_ptPowTo1_predictedGainCostRatio_head4600")
    plotTuple10 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByPredictedGainCostRatio_head4700","UPP-5_10%_ptPowTo1_predictedGainCostRatio_head4700")
    plotTuple11 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByPredictedGainCostRatio_head4800","UPP-5_10%_ptPowTo1_predictedGainCostRatio_head4800")
    plotTuple12 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByPredictedGainCostRatio_head4900","UPP-5_10%_ptPowTo1_predictedGainCostRatio_head4900")
    
    plotTuple13 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo1_20141022_sortedByPredictedGainCostRatio_FINAL","UPP-5_10%_ptPowTo1_predictedGainCostRatio_FIXED")
    '''
    
    # 30%, ptPowTo0
    # plotTuple3 = (26889,49966,64417.5,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_30%_ptPowTo0_bottomCost1ms_FINAL","UPP-5_30%_ptPowTo0_predictedGainCostRatio")
    # 40%, ptPowTo0
    plotTuple41 = (31545,49966,118171,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_40%_ptPowTo0_bottomCost1ms_FINAL","UPP-5_40%_ptPowTo0_predictedGainCostRatio")
    plotTuple42 = (31545,49966,118171,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_40%_ptPowTo0_bottomCost1ms_sortedByPredictedCost_FINAL","UPP-5_40%_ptPowTo0_predictedCost")
    plotTuple43 = (31545,49966,118171,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_40%_ptPowTo0_bottomCost1ms_sortedByActuralCost_FINAL","UPP-5_40%_ptPowTo0_acturalCost")
    plotTuple44 = (31545,49966,118171,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_40%_ptPowTo0_bottomCost1ms_sortedByActuralGainCost_FINAL","UPP-5_40%_ptPowTo0_acturalGainCost")
    
    # 50%, ptPowTo0
    plotTuple51 = (36051,49966,188043,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_50%_ptPowTo0_bottomCost1ms_FINAL","UPP-5_50%_ptPowTo0_predictedGainCostRatio")    
    plotTuple52 = (36051,49966,188043,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_50%_ptPowTo0_bottomCost1ms_sortedByPredictedCost_FINAL","UPP-5_50%_ptPowTo0_predictedCost")
    plotTuple53 = (36051,49966,188043,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_50%_ptPowTo0_bottomCost1ms_sortedByActuralCost_FINAL","UPP-5_50%_ptPowTo0_acturalCost")
    plotTuple54 = (36051,49966,188043,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_50%_ptPowTo0_bottomCost1ms_sortedByActuralGainCost_FINAL","UPP-5_50%_ptPowTo0_acturalGainCost")
    
    fileList.append(plotTuple41)
    fileList.append(plotTuple42)
    fileList.append(plotTuple43)
    fileList.append(plotTuple44)

    #fileList.append(plotTuple51)
    #fileList.append(plotTuple52)
    #fileList.append(plotTuple53)
    #fileList.append(plotTuple54)
    
    '''
    fileList.append(plotTuple1)
    fileList.append(plotTuple2)
    fileList.append(plotTuple3)
    fileList.append(plotTuple4)
    fileList.append(plotTuple5)
    '''

    '''
    fileList.append(plotTuple2)
    fileList.append(plotTuple3)
    fileList.append(plotTuple7)
    fileList.append(plotTuple9)
    fileList.append(plotTuple10)    
    fileList.append(plotTuple11)
    '''
    
    #fileList.append(plotTuple1)
    #fileList.append(plotTuple2)
    #fileList.append(plotTuple3)
    #fileList.append(plotTuple4)
    #fileList.append(plotTuple5)
    #fileList.append(plotTuple6)
    #fileList.append(plotTuple7)
    #fileList.append(plotTuple8)
    #fileList.append(plotTuple9)
    #fileList.append(plotTuple10)
    #fileList.append(plotTuple11)
    #fileList.append(plotTuple12)
    #fileList.append(plotTuple13)
    
    for tuple in fileList:
        histogramDict = copy.deepcopy(histogramDictOriginal)
        (gainBasedNumerator,gainDenominator,costTotal,numOfQueries,fileName,labelValue) = tuple
        ifn = basePath + fileName
        print "ifn:",ifn
        dataPointTupleList = []
        
        gainAvg = gainBasedNumerator / gainDenominator 
        costAvg = costTotal/numOfQueries
        dataPointTupleList.append((gainAvg,costAvg))
        
        ifh = open(ifn,"r")
        l = ifh.readline()
        queryCounter = 0 
        print "-->beg",gainAvg,costAvg,histogramDict[0],histogramDict[1],histogramDict[2],histogramDict[3],histogramDict[4],histogramDict[5],histogramDict[6],histogramDict[7],histogramDict[8],histogramDict[9],histogramDict[10]
        while l:
            le = l.strip().split(" ")
            currentQID = le[0]
            currentGain = float(le[2])
            currentCost = float(le[4])
            gainBasedNumerator += currentGain
            currentAlreadyGet = 10 - currentGain
            histogramDict[currentAlreadyGet] -= 1
            histogramDict[10] += 1
            if reimbursementFlag:
                # print "-->currentQID:",currentQID
                print l.strip()
                costTotal += currentCost - qidTier1CostDict[currentQID]
            else:
                costTotal += currentCost
            gainAvg = gainBasedNumerator / gainDenominator 
            costAvg = costTotal/numOfQueries
            dataPointTupleList.append((gainAvg,costAvg))
            l = ifh.readline()
            if queryCounter % 500 == 0:
                print "-->mid",gainAvg,costAvg,histogramDict[0],histogramDict[1],histogramDict[2],histogramDict[3],histogramDict[4],histogramDict[5],histogramDict[6],histogramDict[7],histogramDict[8],histogramDict[9],histogramDict[10]
            queryCounter += 1
        ifh.close()
        print "Overall:"
        print "len(dataPointTupleList):",len(dataPointTupleList)
        print "dataPointTupleList[0]:",dataPointTupleList[0]
        print "dataPointTupleList[-1]:",dataPointTupleList[-1]
        print "queryCounter:",queryCounter
        print "-->end",gainAvg,costAvg,histogramDict[0],histogramDict[1],histogramDict[2],histogramDict[3],histogramDict[4],histogramDict[5],histogramDict[6],histogramDict[7],histogramDict[8],histogramDict[9],histogramDict[10]
        xDataPoints = []
        yDataPoints = []
        for tuple in dataPointTupleList:
            # print tuple
            (gainAvg,costAvg) = tuple
            xDataPoints.append(costAvg)
            yDataPoints.append(gainAvg)
        plt.plot(xDataPoints, yDataPoints, label=labelValue)
    plt.legend(loc="lower right")
    plt.grid(True)
    plt.ylabel("Overlap Quality")
    plt.xlabel("Avg QPC in ms")
plt.show()
exit(1)

# 20%
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_20%_ptPowTo0_bottomCost1ms_FIXED"
# 30%
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_30%_ptPowTo0_bottomCost1ms_FIXED"
# 40%
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_40%_ptPowTo0_bottomCost1ms_FIXED"
# 50%
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_50%_ptPowTo0_bottomCost1ms_FIXED"
ofh = open(ofn,"w")

# UPP-5_10%_ptPowTo0
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_20%_ptPowTo0_20141018_sortedByPredictedGainCostRatio"
# UPP-5_20%_ptPowTo0
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_20%_ptPowTo0_sortedByGainCostRatio"
# UPP-5_30%_ptPowTo0
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_30%_ptPowTo0_sortedByGainCostRatio"
# UPP-5_40%_ptPowTo0
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_40%_ptPowTo0_sortedByGainCostRatio"
# UPP-5_50%_ptPowTo0
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_50%_ptPowTo0_sortedByGainCostRatio"

ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    currentPredictedGain = float(le[1])
    currentPredictedCost = float(le[3])
    if currentPredictedGain < 0:
        currentPredictedGain = 0
    if currentPredictedCost < 1:
        currentPredictedCost = 1
    # print le[0],currentPredictedGain,le[2],currentPredictedCost,le[4],currentPredictedGain/currentPredictedCost
    ofh.write(str(le[0]) + " " + str(currentPredictedGain) + " " + str(le[2]) + " " + str(currentPredictedCost) + " " + str(le[4]) + " " + str(currentPredictedGain/currentPredictedCost) + "\n")
    l = ifh.readline()

print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
ofh.close()
exit(1)

ifn0 = sys.argv[1] # tier1 term list length
ifn2 = sys.argv[2] # tier1 term sample values
ifn3 = sys.argv[3] # tier1 query cost
ifn4 = sys.argv[4] # tier2 query gain
# sample file name: MLLearnedComponent_20141016/UPP-5_10%_ptPowTo0/trainingFile_UPP-5_10%_ptPowTo0_tiering_20141017_head_another5000.arff
ofn = sys.argv[5] # output new training file
ofn_predictingCost = ofn[:-5] + "_predictingCost" + ".arff"
ofn_predictingGain = ofn[:-5] + "_predictingGain" + ".arff"

# load the tier1 term list length
tier1TermListDict = {}
# UPP-5_5%_ptPowTo1
# ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_5%_ptPowTo1/listLength_FINAL"
# UPP-5_10%_ptPowTo1
# ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_10%_ptPowTo1/listLength_FINAL"

# UPP-5_1%_ptPowTo0
# UPP-5_5%_ptPowTo0
# UPP-5_10%_ptPowTo0

# UPP-5_1%_ptPowToDot1
# UPP-5_5%_ptPowToDot1
# UPP-5_10%_ptPowToDot1

# UPP-5_1%_ptPowToDot3
# UPP-5_5%_ptPowToDot3
# UPP-5_10%_ptPowToDot3

# UPP-5_1%_ptPowToDot5
# UPP-5_5%_ptPowToDot5
# UPP-5_10%_ptPowToDot5

ifh0 = open(ifn0,"r")
for line in ifh0.readlines():
    lineElements = line.strip().split(" ")
    term = lineElements[0]
    listLength = int(lineElements[1])
    tier1TermListDict[term] = listLength 
print "len(tier1TermListDict):",len(tier1TermListDict)
ifh0.close()

qidAndQueryTermsDict = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_20140908_step1"
ifh1 = open(ifn1,"r")
for line in ifh1.readlines():
    lineElements = line.strip().split(":")
    queryID = lineElements[0]
    queryTerms = lineElements[1].split(" ")
    if len(queryTerms) > 10:
        queryTerms = queryTerms[:10]
    qidAndQueryTermsDict[queryID] = queryTerms  
ifh1.close()

tier1TermSamplingValuesDict = {}
# UPP-5_5%_ptPowTo1
# ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_5%_ptPowTo1/UPP-5_5%_queryTermsSamplingValues_20141016_FINAL"
# UPP-5_10%_ptPowTo1
# ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_10%_ptPowTo1/UPP-5_10%_queryTermsSamplingValues_20141016_FINAL"
ifh2 = open(ifn2,"r")
l = ifh2.readline()
while l:
    le = l.strip().split(" ")
    term = le[0]
    outputLine = ""
    for value in le[1:]:
        outputLine += value + ","
    tier1TermSamplingValuesDict[term] = outputLine
    l = ifh2.readline()
ifh2.close()
print "len(tier1TermSamplingValuesDict):",len(tier1TermSamplingValuesDict)
# print "tier1TermSamplingValuesDict['0']:",tier1TermSamplingValuesDict['0']
# exit(1)

qidTier1CostDict = {}
# UPP-5_5%_ptPowTo1
# ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_5%_ptPowTo1/another5000_cost"
# UPP-5_10%_ptPowTo1
# ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_10%_ptPowTo1/another5000_cost"
ifh3 = open(ifn3,"r")
l = ifh3.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    tier1Cost = float(le[1])
    qidTier1CostDict[qid] = tier1Cost 
    l = ifh3.readline()
ifh3.close()
print "len(qidTier1CostDict):",len(qidTier1CostDict)

qidTier2GainDict = {}
# UPP-5_5%_ptPowTo1
# ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_5%_ptPowTo1/another5000_gain"
# UPP-5_10%_ptPowTo1
# ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_10%_ptPowTo1/another5000_gain"
ifh4 = open(ifn4,"r")
l = ifh4.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    gain = int(le[1]) - int(le[2])
    qidTier2GainDict[qid] = gain
    l = ifh4.readline()
ifh4.close()
print "len(qidTier2GainDict):",len(qidTier2GainDict)
print "qidTier2GainDict['1']:",qidTier2GainDict['1']

# UPP-5_5%_ptPowTo1
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_UPP-5_5%_ptPowTo1_tiering_20141017_head_another5000.arff"
# UPP-5_10%_ptPowTo1
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_UPP-5_10%_ptPowTo1_tiering_20141017_head_another5000.arff"
ofh = open(ofn,"w")
ofh_predictingCost = open(ofn_predictingCost,"w")
ofh_predictingGain = open(ofn_predictingGain,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_UPP-5_1%_ptPowTo1_tiering_20141014_head_another5000.arff"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(",")
    if len(le) == 237:
        #print "original:"
        #print l.strip()
        outputLine = ""
        outputLine += le[0] + ","
        outputLine += le[1] + ","
        #print "new:"
        # step1:
        for term in qidAndQueryTermsDict[ le[0] ]:
            if term in tier1TermListDict:
                outputLine += str(tier1TermListDict[term]) + ","
            else:
                outputLine += "?" + ","
        for i in range(0,(10-len(qidAndQueryTermsDict[le[0]])) ):
            outputLine += "?" + ","
        
        # step2:
        for term in qidAndQueryTermsDict[ le[0] ]:
            if term in tier1TermSamplingValuesDict:
                outputLine += str(tier1TermSamplingValuesDict[term])
            else:
                outputLine += "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + ","
        
        for i in range(0,(10-len(qidAndQueryTermsDict[le[0]])) ):
            outputLine += "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + "," + "?" + ","        
        
        # step3:
        # copy what ever I have for tier2
        for i in range(0,110):
            outputLine += le[112+i] + ","
        
        # step4:
        # compute the ll* from tier1 to ll* from tier2
        tempLE = outputLine.strip().split(",")
        for i in range(0,10):
            if tempLE[2+i] != "?" and tempLE[112+i] != "?" and tempLE[112+i] != "0":
                outputLine += str( float(tempLE[2+i]) / float(tempLE[112+i]) ) + ","
            elif tempLE[112+i] == "0":
                outputLine += "0" + ","
            else:
                outputLine += "?" + ","
        
        # step5
        outputLine += str(qidTier1CostDict[ le[0] ]) + ","
        # step6
        outputLine += str(qidTier2GainDict[ le[0] ]) + ","
        # step7
        outputLine += le[236]
        
        outputLine += "\n"
        #print outputLine.strip()
        #print
        
        # for the general training file
        ofh.write(outputLine)
        tle = outputLine.strip().split(",")
        
        # for the training file used for predicting cost
        outputLineForCost = ""
        for value in tle[1:233]:
            outputLineForCost += value + ","
        outputLineForCost += tle[234]
        outputLineForCost += "\n"
        ofh_predictingCost.write(outputLineForCost)
        
        # for the training file used for predicting gain
        outputLineForGain = ""
        for value in tle[1:233]:
            outputLineForGain += value + ","
        outputLineForGain += tle[233]
        outputLineForGain += "\n"
        ofh_predictingGain.write(outputLineForGain)
    else:
        # ignore two don't need meta lines
        if l.strip().startswith("@attribute resultSizeUnderAND_tier1 numeric"):
            pass
        elif l.strip().startswith("@attribute resultSizeUnderOR_tier1 numeric"):
            pass
        else:
            ofh.write(l)
            if l.strip().startswith("@attribute queryID string"):
                pass
            else:
                if l.strip().startswith("@attribute gain numeric"):
                    pass
                else:
                    ofh_predictingCost.write(l)

                if l.strip().startswith("@attribute cost numeric"):
                    pass
                else:
                    ofh_predictingGain.write(l)
            
    # print "len(le):",len(le)
    l = ifh.readline()

ifh.close()
ofh.close()
ofh_predictingCost.close()
ofh_predictingGain.close()

print "Overall:"
print "ofn:",ofn
print "ofn_predictingCost:",ofn_predictingCost
print "ofn_predictingGain:",ofn_predictingGain
exit(1)

# output the cost
ifn = sys.argv[1]

qidWithCostDict = {}
qid = ""
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    if l.strip().startswith("qid"):
        le = l.strip().split(" ")
        qid = le[1]
        # print qid,
        if qid not in qidWithCostDict:
            qidWithCostDict[qid] = 1000
    if l.strip().startswith("WWW2015 Showing"):
        le = l.strip().split(" ")
        # print le[-2][1:]
        qidWithCostDict[qid] = le[-2][1:]
    
    l = ifh.readline()
ifh.close()
#print "Overall:"
#print "len(qidWithCostDict):",len(qidWithCostDict)
for qid in qidWithCostDict:
    print qid,qidWithCostDict[qid]
exit(1)

basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
fileList = []
#fileList.append("rawResults_1%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_5%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_10%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_20%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_30%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_40%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_50%_HumanJudgedQueries_docHits_20141022_RAW")

#fileList.append("rawResultsHumanJudgedQueries_UPP-5_1%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_5%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_10%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_20%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_30%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_40%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_50%_ptPowTo0_20141022_RAW")

fileList.append("rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW")

for fileName in fileList:
    complteFileName = basePath + fileName
    ofn = complteFileName + "_" + "QPC_tier1"
    ofh = open(ofn,"w")
    
    qidWithCostDict = {}
    qid = ""
    ifh = open(complteFileName,"r")
    print "complteFileName:",complteFileName
    l = ifh.readline()
    while l:
        if l.strip().startswith("qid"):
            le = l.strip().split(" ")
            qid = le[1]
            # print qid,
            if qid not in qidWithCostDict:
                qidWithCostDict[qid] = 1000
        if l.strip().startswith("WWW2015 Showing"):
            le = l.strip().split(" ")
            # print le[-2][1:]
            qidWithCostDict[qid] = le[-2][1:]
        
        l = ifh.readline()

    
    print "Overall:"
    print "len(qidWithCostDict):",len(qidWithCostDict)
    #for qid in qidWithCostDict:
    #    print qid,qidWithCostDict[qid]
    for qid in qidWithCostDict:
        ofh.write(qid + " " + qidWithCostDict[qid] + "\n")
    print "complteFileName:",complteFileName
    print "ofn:",ofn 
    ifh.close()
    ofh.close()
exit(1)

# logic of telling the differences
ifn2 = sys.argv[1]

# the performance for another 5000 queries for the UPP method.
qidDict1 = {}
qidDict2 = {}

top10DocIDDictFromGoldStandard = {}
top10DocIDDictFromUPP5Method = {}

# 150 human judge queries
# ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW_Results_tier1_REAL"
# head5K another set of queries
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TOP10_documentResults_OR_unpruned_head_5000_20141016"
ifh1 = open(ifn1,"r")
counter = 0
for l in ifh1.readlines():
    le = l.strip().split(" ")
    # documentResultKey = le[0] + "_" + le[2]
    if le[0] not in qidDict1:
        qidDict1[le[0]] = 1
    if le[0] not in top10DocIDDictFromGoldStandard:
        top10DocIDDictFromGoldStandard[le[0]] = []
    top10DocIDDictFromGoldStandard[le[0]].append(le[2])
    counter += 1

#print "len(qidDict1):",len(qidDict1)
#print "len(top10DocIDDictFromGoldStandard):",len(top10DocIDDictFromGoldStandard)
#print "top10DocIDDictFromGoldStandard['701']:",top10DocIDDictFromGoldStandard['701']
#print "counter:",counter

ifh2 = open(ifn2,"r")
for l in ifh2.readlines():
    le = l.strip().split(" ")
    # documentResultKey = le[0] + "_" + le[2]
    if le[0] not in qidDict2:
        qidDict2[le[0]] = 1
    if le[3] in top10DocIDDictFromGoldStandard[le[0]]:
        if le[0] not in top10DocIDDictFromUPP5Method:
            top10DocIDDictFromUPP5Method[le[0]] = 0
        top10DocIDDictFromUPP5Method[le[0]] += 1
#print "len(qidDict2):",len(qidDict2)
#print "len(top10DocIDDictFromUPP5Method):",len(top10DocIDDictFromUPP5Method)
#print "Overall:"
#print "ifn1:",ifn1
#print "ifn2:",ifn2
for qid in top10DocIDDictFromGoldStandard:
    if qid in top10DocIDDictFromUPP5Method:
        print qid,len(top10DocIDDictFromGoldStandard[qid]),top10DocIDDictFromUPP5Method[qid]
    else:
        print qid,len(top10DocIDDictFromGoldStandard[qid]),"0"
ifh1.close()
ifh2.close()
exit(1)

# Let's make those results
basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
fileList = []
'''
fileList.append("MLLearnedComponent_20141016/UPP-5_1%_ptPowTo0/rawResults_UPP-5_ptPowTo0_max-score_another5000Queries_20141016")
fileList.append("MLLearnedComponent_20141016/UPP-5_5%_ptPowTo0/rawResults_UPP-5_ptPowTo0_max-score_another5000Queries_20141016")
fileList.append("MLLearnedComponent_20141016/UPP-5_10%_ptPowTo0/rawResults_UPP-5_ptPowTo0_max-score_another5000Queries_20141016")

fileList.append("MLLearnedComponent_20141016/UPP-5_1%_ptPowToDot1/rawResults_UPP-5_ptPowToDot1_max-score_another5000Queries_20141016")
fileList.append("MLLearnedComponent_20141016/UPP-5_5%_ptPowToDot1/rawResults_UPP-5_ptPowToDot1_max-score_another5000Queries_20141016")
fileList.append("MLLearnedComponent_20141016/UPP-5_10%_ptPowToDot1/rawResults_UPP-5_ptPowToDot1_max-score_another5000Queries_20141016")

fileList.append("MLLearnedComponent_20141016/UPP-5_1%_ptPowToDot3/rawResults_UPP-5_ptPowToDot3_max-score_another5000Queries_20141016")
fileList.append("MLLearnedComponent_20141016/UPP-5_5%_ptPowToDot3/rawResults_UPP-5_ptPowToDot3_max-score_another5000Queries_20141016")
fileList.append("MLLearnedComponent_20141016/UPP-5_10%_ptPowToDot3/rawResults_UPP-5_ptPowToDot3_max-score_another5000Queries_20141016")

fileList.append("MLLearnedComponent_20141016/UPP-5_1%_ptPowToDot5/rawResults_UPP-5_ptPowToDot5_max-score_another5000Queries_20141016")
fileList.append("MLLearnedComponent_20141016/UPP-5_5%_ptPowToDot5/rawResults_UPP-5_ptPowToDot5_max-score_another5000Queries_20141016")
fileList.append("MLLearnedComponent_20141016/UPP-5_10%_ptPowToDot5/rawResults_UPP-5_ptPowToDot5_max-score_another5000Queries_20141016")

fileList.append("MLLearnedComponent_20141016/UPP-5_1%_ptPowTo1/rawResults_UPP-5_ptPowTo1_max-score_another5000Queries_20141016")
fileList.append("MLLearnedComponent_20141016/UPP-5_5%_ptPowTo1/rawResults_UPP-5_ptPowTo1_max-score_another5000Queries_20141016")
fileList.append("MLLearnedComponent_20141016/UPP-5_10%_ptPowTo1/rawResults_UPP-5_ptPowTo1_max-score_another5000Queries_20141016")
'''

fileList.append("MLLearnedComponent_20141016/UPP-5_20%_ptPowTo0/rawResults_UPP-5_ptPowTo0_max-score_another5000Queries")
fileList.append("MLLearnedComponent_20141016/UPP-5_30%_ptPowTo0/rawResults_UPP-5_ptPowTo0_max-score_another5000Queries")
fileList.append("MLLearnedComponent_20141016/UPP-5_40%_ptPowTo0/rawResults_UPP-5_ptPowTo0_max-score_another5000Queries")
fileList.append("MLLearnedComponent_20141016/UPP-5_50%_ptPowTo0/rawResults_UPP-5_ptPowTo0_max-score_another5000Queries")

completeIFN = ""
completeOFN = ""
for fileName in fileList:
    completeIFN = basePath + fileName
    completeOFN = basePath + fileName + "_reformatted"
    #print completeIFN
    #print completeOFN
    #print
    ifh = open(completeIFN,"r")
    ofh = open(completeOFN,"w")
    l = ifh.readline()
    qid = ""
    while l:
        if l.strip().startswith("qid:"):
            le = l.strip().split(" ")
            qid = le[1]
        if l.strip().startswith("-->"):
            le = l.strip().split(" ")
            rank = le[1]
            score = le[2]
            docID = le[3]
            ofh.write(qid + " " + rank + " " + score + " " + docID + "\n")
        l = ifh.readline()
    ifh.close()
    ofh.close()
    print "Overall:"
    print "completeIFN:",completeIFN
    print "completeOFN:",completeOFN
    print
exit(1)

basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016"
fileList = []
fileList.append("UPP-5_20%_ptPowTo0/UPP-5_20%_queryTermsSamplingValues_RAW")
fileList.append("UPP-5_30%_ptPowTo0/UPP-5_30%_queryTermsSamplingValues_RAW")
fileList.append("UPP-5_40%_ptPowTo0/UPP-5_40%_queryTermsSamplingValues_RAW")
fileList.append("UPP-5_50%_ptPowTo0/UPP-5_50%_queryTermsSamplingValues_RAW")

'''
fileList.append("UPP-5_1%_ptPowTo0/UPP-5_1%_queryTermsSamplingValues_20141016_RAW")
fileList.append("UPP-5_5%_ptPowTo0/UPP-5_5%_queryTermsSamplingValues_20141016_RAW")
fileList.append("UPP-5_10%_ptPowTo0/UPP-5_10%_queryTermsSamplingValues_20141016_RAW")

fileList.append("UPP-5_1%_ptPowToDot1/UPP-5_1%_queryTermsSamplingValues_20141016_RAW")
fileList.append("UPP-5_5%_ptPowToDot1/UPP-5_5%_queryTermsSamplingValues_20141016_RAW")
fileList.append("UPP-5_10%_ptPowToDot1/UPP-5_10%_queryTermsSamplingValues_20141016_RAW")

fileList.append("UPP-5_1%_ptPowToDot3/UPP-5_1%_queryTermsSamplingValues_20141016_RAW")
fileList.append("UPP-5_5%_ptPowToDot3/UPP-5_5%_queryTermsSamplingValues_20141016_RAW")
fileList.append("UPP-5_10%_ptPowToDot3/UPP-5_10%_queryTermsSamplingValues_20141016_RAW")

fileList.append("UPP-5_1%_ptPowToDot5/UPP-5_1%_queryTermsSamplingValues_20141016_RAW")
fileList.append("UPP-5_5%_ptPowToDot5/UPP-5_5%_queryTermsSamplingValues_20141016_RAW")
fileList.append("UPP-5_10%_ptPowToDot5/UPP-5_10%_queryTermsSamplingValues_20141016_RAW")

fileList.append("UPP-5_5%_ptPowTo1/UPP-5_5%_queryTermsSamplingValues_20141016_RAW")
fileList.append("UPP-5_10%_ptPowTo1/UPP-5_10%_queryTermsSamplingValues_20141016_RAW")
'''

positionToRecord = [1,5,10,20,50,100,500,1000,5000,10000]

completeIFNPath = ""
completeOFNPath1 = ""
for fileName in fileList:
    completeIFNPath = basePath + "/" + fileName
    completeOFNPath1 = basePath + "/" + fileName[:-3] + "FINAL"
    completeOFNPath2 = basePath + "/" + fileName.strip().split("/")[0] + "/" + "listLength_FINAL"
    parseFlag = False
    ifh = open(completeIFNPath,"r")
    ofh1 = open(completeOFNPath1,"w")
    ofh2 = open(completeOFNPath2,"w")
    
    l = ifh.readline()
    while l:
        if l.startswith("Overlapping Layer for"):
            le = l.strip().split(" ")
            term = le[3][1:-2]
            parseFlag = True
            outputLine = term + " "
            counter = 0
            while parseFlag:
                l = ifh.readline()
                
                if l.strip().startswith("WWW2015 Showing"):
                    le = l.strip().split(" ")
                    listLength = le[6][:-1]
                    parseFlag = False
                    for i in range(0,10-counter):
                        outputLine += "?" + " "
                    #print outputLine.strip()
                    #print term,listLength
                    #print
                    ofh1.write(outputLine + "\n")
                    ofh2.write(str(term) + " " + str(listLength) + "\n")
                    break
                
                le = l.strip().split(" ")
                rank = int(le[0])
                score = float(le[1])
                docID = int(le[2])
                if rank == positionToRecord[counter]:
                    pass
                else:
                    print "sth wrong"
                    exit(1)
                outputLine += str(score) + " "
                counter += 1
        l = ifh.readline()
    
    ifh.close()
    ofh1.close()
    ofh2.close()
    print "Overall:"
    print "completeIFNPath:",completeIFNPath
    print "completeOFNPath1:",completeOFNPath1
    print "completeOFNPath2:",completeOFNPath2
    print
exit(1)

tupleFileList = []
basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
tuple1 = ("UPP-5_10%_ptPowTo0_gain","rawResults_10%_HumanJudgedQueries_docHits_20141022_RAW_QPC_tier1","rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW_QPC_tier2")
tuple2 = ("UPP-5_20%_ptPowTo0_gain","rawResults_20%_HumanJudgedQueries_docHits_20141022_RAW_QPC_tier1","rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW_QPC_tier2")
tuple3 = ("UPP-5_30%_ptPowTo0_gain","rawResults_30%_HumanJudgedQueries_docHits_20141022_RAW_QPC_tier1","rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW_QPC_tier2")
tuple4 = ("UPP-5_40%_ptPowTo0_gain","rawResults_40%_HumanJudgedQueries_docHits_20141022_RAW_QPC_tier1","rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW_QPC_tier2")
tuple5 = ("UPP-5_50%_ptPowTo0_gain","rawResults_50%_HumanJudgedQueries_docHits_20141022_RAW_QPC_tier1","rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW_QPC_tier2")

tuple1 = ("docHits_10%_gain","rawResultsHumanJudgedQueries_UPP-5_10%_ptPowTo0_20141022_RAW_QPC_tier1","rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW_QPC_tier2")
tuple2 = ("docHits_20%_gain","rawResultsHumanJudgedQueries_UPP-5_20%_ptPowTo0_20141022_RAW_QPC_tier1","rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW_QPC_tier2")
tuple3 = ("docHits_30%_gain","rawResultsHumanJudgedQueries_UPP-5_30%_ptPowTo0_20141022_RAW_QPC_tier1","rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW_QPC_tier2")
tuple4 = ("docHits_40%_gain","rawResultsHumanJudgedQueries_UPP-5_40%_ptPowTo0_20141022_RAW_QPC_tier1","rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW_QPC_tier2")
tuple5 = ("docHits_50%_gain","rawResultsHumanJudgedQueries_UPP-5_50%_ptPowTo0_20141022_RAW_QPC_tier1","rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW_QPC_tier2")

tupleFileList.append(tuple1)
tupleFileList.append(tuple2)
tupleFileList.append(tuple3)
tupleFileList.append(tuple4)
tupleFileList.append(tuple5)

tupleFileList.append(tuple1)
tupleFileList.append(tuple2)
tupleFileList.append(tuple3)
tupleFileList.append(tuple4)
tupleFileList.append(tuple5)

for tuple in tupleFileList:
    (f1,f2,f3) = tuple
    f1Name = basePath + f1
    f2Name = basePath + f2
    f3Name = basePath + f3
    
exit(1)




basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
fileList = []
#fileList.append("rawResults_1%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_5%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_10%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_20%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_30%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_40%_HumanJudgedQueries_docHits_20141022_RAW")
#fileList.append("rawResults_50%_HumanJudgedQueries_docHits_20141022_RAW")

#fileList.append("rawResultsHumanJudgedQueries_UPP-5_1%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_5%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_10%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_20%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_30%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_40%_ptPowTo0_20141022_RAW")
#fileList.append("rawResultsHumanJudgedQueries_UPP-5_50%_ptPowTo0_20141022_RAW")

fileList.append("rawResults_real90%_150HumanJudgeQueries_pretendToBe100%_UPP-5_ptPowTo1_20141022_RAW")

for fileName in fileList:
    complteFileName = basePath + fileName
    ifh = open(complteFileName,"r")
    # print "complteFileName:",complteFileName
    
    ofn = complteFileName + "_" + "Results_tier1"
    ofh = open(ofn,"w")
    
    qidWithCostDict = {}
    qid = ""

    l = ifh.readline()
    while l:
        if l.strip().startswith("qid"):
            le = l.strip().split(" ")
            qid = le[1]
        if l.strip().startswith("-->"):
            le = l.strip().split(" ")
            # print qid,le[1],le[2],le[3]
            ofh.write(str(qid) + " " + str(le[1]) +" " + str(le[2]) + " " + str(le[3]) + "\n")
        l = ifh.readline()
    
    print "Overall:"
    print "complteFileName:",complteFileName
    print "ofn:",ofn
    print
    
    ifh.close()
    ofh.close()
exit(1)



reimbursementFlag = False
for i in range(0,1):
    if i == 0:
        reimbursementFlag = False
    else:
        reimbursementFlag = True
    basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
    fileList = []
    
    fileList.append(("docHitsClairvoyantMethod/docHits_1%_sortedByActualGainCostRatio","docHits_CV_1%","solid","b"))
    #fileList.append("docHitsClairvoyantMethod/docHits_2%_sortedByActualGainCostRatio")
    fileList.append(("docHitsClairvoyantMethod/docHits_5%_sortedByActualGainCostRatio","docHits_CV_5%","solid","b"))
    fileList.append(("docHitsClairvoyantMethod/docHits_10%_sortedByActualGainCostRatio","docHits_CV_10%","solid","b"))
    fileList.append(("docHitsClairvoyantMethod/docHits_20%_sortedByActualGainCostRatio","docHits_CV_20%","solid","b"))
    fileList.append(("docHitsClairvoyantMethod/docHits_30%_sortedByActualGainCostRatio","docHits_CV_30%","solid","b"))
    fileList.append(("docHitsClairvoyantMethod/docHits_40%_sortedByActualGainCostRatio","docHits_CV_40%","solid","b"))
    #fileList.append("docHitsClairvoyantMethod/docHits_50%_sortedByActualGainCostRatio")
    #fileList.append("docHitsClairvoyantMethod/docHits_60%_sortedByActualGainCostRatio")
    #fileList.append("docHitsClairvoyantMethod/docHits_70%_sortedByActualGainCostRatio")
    
    fileList.append(("UPP-5_1%_gain_cost_ratio_20141015_ptPowTo0_sortedByGainCostRatio","UPP-5_CV_1%_ptPowTo0","dashed","r"))
    fileList.append(("UPP-5_5%_gain_cost_ratio_20141015_ptPowTo0_sortedByGainCostRatio","UPP-5_CV_5%_ptPowTo0","dashed","r"))
    fileList.append(("UPP-5_10%_gain_cost_ratio_20141015_ptPowTo0_sortedByGainCostRatio","UPP-5_CV_10%_ptPowTo0","dashed","r"))
    fileList.append(("UPP-5_20%_gain_cost_ratio_20141015_ptPowTo0_sortedByGainCostRatio","UPP-5_CV_20%_ptPowTo0","dashed","r"))
    fileList.append(("UPP-5_30%_gain_cost_ratio_20141015_ptPowTo0_sortedByGainCostRatio","UPP-5_CV_30%_ptPowTo0","dashed","r"))
    fileList.append(("UPP-5_40%_gain_cost_ratio_20141015_ptPowTo0_sortedByGainCostRatio","UPP-5_CV_40%_ptPowTo0","dashed","r"))
    #fileList.append("UPP-5_50%_gain_cost_ratio_20141015_ptPowTo0_sortedByGainCostRatio")
    #fileList.append("UPP-5_60%_gain_cost_ratio_20141015_ptPowTo0_sortedByGainCostRatio")
    #fileList.append("UPP-5_70%_gain_cost_ratio_20141015_ptPowTo0_sortedByGainCostRatio")
    #fileList.append("UPP-5_80%_gain_cost_ratio_20141015_ptPowTo0_sortedByGainCostRatio")
    #fileList.append("UPP-5_90%_gain_cost_ratio_20141015_ptPowTo0_sortedByGainCostRatio")
    
    #fileList.append("UPP-5_1%_gain_cost_ratio_20141015_ptPowToDot1_sortedByGainCostRatio")
    #fileList.append("UPP-5_5%_gain_cost_ratio_20141015_ptPowToDot1_sortedByGainCostRatio")
    #fileList.append("UPP-5_10%_gain_cost_ratio_20141015_ptPowToDot1_sortedByGainCostRatio")
    #fileList.append("UPP-5_20%_gain_cost_ratio_20141015_ptPowToDot1_sortedByGainCostRatio")
    #fileList.append("UPP-5_30%_gain_cost_ratio_20141015_ptPowToDot1_sortedByGainCostRatio")
    #fileList.append("UPP-5_40%_gain_cost_ratio_20141015_ptPowToDot1_sortedByGainCostRatio")
    
    #fileList.append("UPP-5_1%_gain_cost_ratio_20141015_ptPowToDot3_sortedByGainCostRatio")
    #fileList.append("UPP-5_5%_gain_cost_ratio_20141015_ptPowToDot3_sortedByGainCostRatio")
    #fileList.append("UPP-5_10%_gain_cost_ratio_20141015_ptPowToDot3_sortedByGainCostRatio")
    #fileList.append("UPP-5_20%_gain_cost_ratio_20141015_ptPowToDot3_sortedByGainCostRatio")
    #fileList.append("UPP-5_30%_gain_cost_ratio_20141015_ptPowToDot3_sortedByGainCostRatio")
    #fileList.append("UPP-5_40%_gain_cost_ratio_20141015_ptPowToDot3_sortedByGainCostRatio")
    
    #fileList.append("UPP-5_1%_gain_cost_ratio_20141015_ptPowToDot5_sortedByGainCostRatio")
    #fileList.append("UPP-5_5%_gain_cost_ratio_20141015_ptPowToDot5_sortedByGainCostRatio")
    #fileList.append("UPP-5_10%_gain_cost_ratio_20141015_ptPowToDot5_sortedByGainCostRatio")
    #(some problems)fileList.append("UPP-5_20%_gain_cost_ratio_20141015_ptPowToDot5_sortedByGainCostRatio")
    #(some problems)fileList.append("UPP-5_30%_gain_cost_ratio_20141015_ptPowToDot5_sortedByGainCostRatio")
    #(some problems)fileList.append("UPP-5_40%_gain_cost_ratio_20141015_ptPowToDot5_sortedByGainCostRatio")
    
    #fileList.append("UPP-5_1%_gain_cost_ratio_20141014_ptPowTo1_sortedByGainCostRatio")
    #fileList.append("UPP-5_5%_gain_cost_ratio_20141015_ptPowTo1_sortedByGainCostRatio")
    #fileList.append("UPP-5_10%_gain_cost_ratio_20141015_ptPowTo1_sortedByGainCostRatio")
    
    completeFileName = ""
    for tuple in fileList:
        ratioFor10msCheckFlag = True
        ratioFor20msCheckFlag = True 
        ratioFor30msCheckFlag = True
        ratioFor40msCheckFlag = True
        ratioFor50msCheckFlag = True
        ratioFor60msCheckFlag = True
        ratioFor70msCheckFlag = True
        
        (fileName,labelValue,linestyleValue,colorValue) = tuple
        completeFileName = basePath + fileName
        '''
        if reimbursementFlag:
            labelValue = te[1] + "_" + te[-2] + "_" + "reimbursement"
        else:
            labelValue = te[1] + "_" + te[-2] + "_" + "NOreimbursement"
        '''
        
        # step0: open the file the first time in order to fill the histogramDict table.
        # input file sample format
        # 98944 9 10 0.046 0.039 25.641
        # 96685 8 10 0.103 0.104 19.2308
        
        # key: qid
        # value: cost in ms
        qidTier1CostDict = {}
        
        # key: freq
        # value: numOfQueriesBelongingToThisGroup
        histogramDict = {}
        histogramDict[0] = 0
        histogramDict[1] = 0
        histogramDict[2] = 0
        histogramDict[3] = 0
        histogramDict[4] = 0
        histogramDict[5] = 0
        histogramDict[6] = 0
        histogramDict[7] = 0
        histogramDict[8] = 0
        histogramDict[9] = 0
        histogramDict[10] = 0
        
        ifh = open(completeFileName,"r")
        for l in ifh.readlines():
            le = l.strip().split(" ")
            qid = le[0]
            costInTier1 = float(le[3])
            currentAlreadyGet = int(le[1])
            histogramDict[currentAlreadyGet] += 1
            qidTier1CostDict[qid] = costInTier1 
        print "len(histogramDict):",len(histogramDict)
        print "len(qidTier1CostDict):",len(qidTier1CostDict)
        ifh.close()
        if reimbursementFlag:
            pass
        else:
            qidTier1CostDict = {} # turn the reimbursement OFF
        
        print "-->NEED TO RECORD:",fileName
        te = fileName.strip().split("_")

        numOfQueriesFallingThrough = 0
        dataPointTupleList = []
        gainBasedNumerator = 0
        gainDenominator = 49808
        costTotal = 0
        numOfQueries = 4981
        
        # step1: Compute the starting point
        ifh = open(completeFileName,"r")
        for l in ifh.readlines():
            le = l.strip().split(" ")
            gainBasedNumerator += float(le[1])
            costTotal += float(le[3])
        ifh.close()
        gainAvg = gainBasedNumerator / gainDenominator 
        costAvg = costTotal/numOfQueries
        dataPointTupleList.append((gainAvg,costAvg))
        print gainAvg,costAvg
        print
        
        
        # step2: Compute along the way
        # input file sample format
        # 98944 9 10 0.046 0.039 25.641
        # 96685 8 10 0.103 0.104 19.2308
        ifh = open(completeFileName,"r")
        l = ifh.readline()
        
        queryCounter = 0 
        # original
        print "-->beg",queryCounter,gainAvg,costAvg,histogramDict[0],histogramDict[1],histogramDict[2],histogramDict[3],histogramDict[4],histogramDict[5],histogramDict[6],histogramDict[7],histogramDict[8],histogramDict[9],histogramDict[10]
        # simplified
        # print costAvg
        while l:
            le = l.strip().split(" ")
            currentQID = le[0]
            currentGain = int(le[2]) - int(le[1])
            currentCost = float(le[4])
            currentPredictedGainCostRatio = float(le[5])
            currentMagicReimbursed = 0.0
            if currentQID in qidTier1CostDict:
                currentMagicReimbursed = qidTier1CostDict[currentQID]
            else:
                currentMagicReimbursed = 0.0
            currentAlreadyGet = int(le[1])
            histogramDict[currentAlreadyGet] -= 1
            histogramDict[10] += 1
            if currentGain == 0:
                break
            numOfQueriesFallingThrough += 1
            gainBasedNumerator += currentGain
            costTotal += currentCost - currentMagicReimbursed
            gainAvg = gainBasedNumerator / gainDenominator 
            costAvg = costTotal/numOfQueries
            if costAvg >= 10 and ratioFor10msCheckFlag:
                print "-->NEED TO RECORD:",costAvg,currentPredictedGainCostRatio
                ratioFor10msCheckFlag = False
            if costAvg >= 20 and ratioFor20msCheckFlag:
                print "-->NEED TO RECORD:",costAvg,currentPredictedGainCostRatio
                ratioFor20msCheckFlag = False
            if costAvg >= 30 and ratioFor30msCheckFlag:
                print "-->NEED TO RECORD:",costAvg,currentPredictedGainCostRatio
                ratioFor30msCheckFlag = False
            if costAvg >= 40 and ratioFor40msCheckFlag:
                print "-->NEED TO RECORD:",costAvg,currentPredictedGainCostRatio
                ratioFor40msCheckFlag = False
            if costAvg >= 50 and ratioFor50msCheckFlag:
                print "-->NEED TO RECORD:",costAvg,currentPredictedGainCostRatio
                ratioFor50msCheckFlag = False
            if costAvg >= 60 and ratioFor60msCheckFlag:
                print "-->NEED TO RECORD:",costAvg,currentPredictedGainCostRatio
                ratioFor60msCheckFlag = False            
            if costAvg >= 70 and ratioFor70msCheckFlag:
                print "-->NEED TO RECORD:",costAvg,currentPredictedGainCostRatio
                ratioFor70msCheckFlag = False
            dataPointTupleList.append((gainAvg,costAvg))
            l = ifh.readline()
            queryCounter += 1
            if queryCounter % 500 == 0:
                # original
                print "-->mid",queryCounter,gainAvg,costAvg,histogramDict[0],histogramDict[1],histogramDict[2],histogramDict[3],histogramDict[4],histogramDict[5],histogramDict[6],histogramDict[7],histogramDict[8],histogramDict[9],histogramDict[10]
                # simplified
                # print costAvg
        ifh.close()
        # original
        print "-->end",queryCounter,gainAvg,costAvg,histogramDict[0],histogramDict[1],histogramDict[2],histogramDict[3],histogramDict[4],histogramDict[5],histogramDict[6],histogramDict[7],histogramDict[8],histogramDict[9],histogramDict[10]
        # simplified
        # print costAvg
        print "len(dataPointTupleList):",len(dataPointTupleList)
        print "starting point:",dataPointTupleList[0]
        print "ending point:",dataPointTupleList[-1]
        print
        xDataPointsSet2 = []
        yDataPointsSet2 = []
        for tuple in dataPointTupleList:
            # print tuple
            (gainAvg,costAvg) = tuple
            xDataPointsSet2.append(costAvg)
            yDataPointsSet2.append(gainAvg)
        plt.plot(xDataPointsSet2, yDataPointsSet2, label=labelValue, linestyle=linestyleValue, color=colorValue)
    plt.legend(loc="lower right")

plt.title("fall through curves, docHits vs. UPP-5_ptPowTo0")
plt.xlabel("avg query processing cost in ms")
plt.ylabel("overlap quality")
plt.show()
exit(1)

# Too much work and let prof to judge whether it is important or NOT.
# Use the actual gain / cost first and quickly finish the job is important
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/gov2_04-06.topics.polyIRTKCompatibleMode"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(":")
    qid = le[0]
    queryContent = le[1]
    queryLength = queryContent.strip().split(" ")
    print qid,queryContent,queryLength
    l = ifh.readline()
ifh.close()
exit(1)










fileList = []
basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/max-score_TOP10_testingQueries_final_report_20141006/"
fileList.append("ptPowTo1_1%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_2%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_3%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_4%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_5%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_6%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_7%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_8%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_9%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_10%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_20%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_30%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_40%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_50%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_60%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_70%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_80%_max-score_TOP10_testingQueries_20141005")
fileList.append("ptPowTo1_90%_max-score_TOP10_testingQueries_20141005")
for fileName in fileList:
    completeFileName = basePath + fileName
    print completeFileName
    ifh = open(completeFileName,"r")
    l = ifh.readline()
    while l:
        if l.strip().startswith("Average postings scored:"):
            print l.strip()
        if l.strip().startswith("Average data read from cache:"):
            print l.strip()
        if l.strip().startswith("Average query running time (latency):"):
            print l.strip()
        l = ifh.readline()
    ifh.close()
exit(1)

qidAndContentDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2_100KQueries_head_5K"
ifh0 = open(ifn0,"r")
l = ifh0.readline()
while l:
    le = l.strip().split(":")
    qid = le[0]
    queryContent = le[1]
    qidAndContentDict[qid] = queryContent 
    l = ifh0.readline()
print "Overall:"
print "len(qidAndContentDict):",len(qidAndContentDict)
ifh0.close()

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/whySteep_20141022_withQueryContentAdded"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/whySteep_20141022"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    ofh.write(l.strip() + " " + "'" + qidAndContentDict[qid] + "'" + "\n")
    l = ifh.readline()

print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
ofh.close()
exit(1)



docIDAndTrecIDDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/gov2_trecID_docID_MappingTable"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 0
while l:
    le = l.strip().split(" ")
    currentTrec = le[0]
    currentDoc = le[1]
    docIDAndTrecIDDict[currentDoc] = currentTrec
    if lineCounter % 1000000 == 0:
        print "lineCounter:",lineCounter,"processed."
    l = ifh.readline()
    lineCounter += 1
ifh.close()
print "len(docIDAndTrecIDDict):",len(docIDAndTrecIDDict)

basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
fileList = []

fileList.append("rawResultsHumanJudgedQueries_UPP-5_1%_ptPowTo0_20141021_RAW")
fileList.append("rawResultsHumanJudgedQueries_UPP-5_5%_ptPowTo0_20141021_RAW")
fileList.append("rawResultsHumanJudgedQueries_UPP-5_10%_ptPowTo0_20141021_RAW")
fileList.append("rawResultsHumanJudgedQueries_UPP-5_20%_ptPowTo0_20141021_RAW")
fileList.append("rawResultsHumanJudgedQueries_UPP-5_30%_ptPowTo0_20141021_RAW")
fileList.append("rawResultsHumanJudgedQueries_UPP-5_40%_ptPowTo0_20141021_RAW")
fileList.append("rawResultsHumanJudgedQueries_UPP-5_50%_ptPowTo0_20141021_RAW")

fileList.append("rawResults_1%_HumanJudgedQueries_docHits_20141021_RAW")
fileList.append("rawResults_5%_HumanJudgedQueries_docHits_20141021_RAW")
fileList.append("rawResults_10%_HumanJudgedQueries_docHits_20141021_RAW")
fileList.append("rawResults_20%_HumanJudgedQueries_docHits_20141021_RAW")
fileList.append("rawResults_30%_HumanJudgedQueries_docHits_20141021_RAW")
fileList.append("rawResults_40%_HumanJudgedQueries_docHits_20141021_RAW")
fileList.append("rawResults_50%_HumanJudgedQueries_docHits_20141021_RAW")


currentDocID = ""
currentTrecID = ""
outputLine = ""
for ifn in fileList:
    completePath = basePath + ifn
    ifh = open(completePath,"r")
    ofn = completePath[:-4] + "_" + "FIXED"
    ofh = open(ofn,"w")
    l = ifh.readline()
    while l:
        le = l.strip().split("\t")
        # print "len(le):",len(le)
        if len(le) == 6 and le[2].startswith("GX"):
            currentDocID = le[5].strip().split(" ")[1]
            if currentDocID not in docIDAndTrecIDDict:
                currentTrecID = "0"
            else:
                currentTrecID = docIDAndTrecIDDict[currentDocID]
            '''
            print "le:",le
            print l.strip()
            print
            '''
            outputLine = le[0] + " " + le[1] +" " + currentTrecID + " " + le[2] +" " + le[3] +" " + le[4] + " " + le[5] + "\n"
            ofh.write(outputLine)
            # print le[0],le[1],currentTrecID,le[2],le[3],le[4],le[5]
        l = ifh.readline()
    ofh.close()
    ifh.close()
    print "Overall:"
    print "completePath:",completePath
    print "ofn:",ofn
exit(1)

# two files need to compare
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docHitsBasedDocumentAllocationAssignment_20141014/max-score_40%_docHits_rawResults_20141013"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/max-score_40%_UPP-5_rawResults_testingQueries_ptPowTo0_20141014"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/debug_theMaxScoreSecret_20141020"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
sum1 = 0
while l:
    le = l.strip().split(" ")
    if le[0] == "Overlapping":
        # print l.strip(),le[-7][:-1]
        sum1 += int(le[-7][:-1])
    l = ifh1.readline()
ifh1.close()
print "sum1:",sum1

ifh2 = open(ifn2,"r")
l = ifh2.readline()
sum2 = 0
while l:
    le = l.strip().split(" ")
    if le[0] == "Overlapping":
        # print l.strip(),le[-7][:-1]
        sum2 += int(le[-7][:-1])
    l = ifh2.readline()
ifh2.close()
print "sum2:",sum2

ifh3 = open(ifn3,"r")
l = ifh3.readline()
sum3 = 0
while l:
    le = l.strip().split(" ")
    if le[0] == "Overlapping":
        # print l.strip(),le[-7][:-1]
        sum3 += int(le[-7][:-1])
    l = ifh3.readline()
ifh3.close()
print "sum3:",sum3
exit(1)

top10DocDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TOP10_documentResults_OR_unpruned_testingQueries_20141020"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    currentDoc = le[3]
    top10DocDict[currentDoc] = 1
    l = ifh.readline()
ifh.close()
print "Overall:"
print "len(top10DocDict):",len(top10DocDict)

counter = 0
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/tier1_OR_40%_whole_docIDs"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    doc = le[0]
    if doc in top10DocDict:
        counter += 1
    l = ifh.readline()
ifh.close()
print "Overall:"
print "counter:",counter
exit(1)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_testingQueries_100%_TOP1000_OR_20140927"
ifh = open(ifn,"r")
l = ifh.readline()
currentQID = ""
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        currentQID = le[1]
    # print "len(le):",len(le)
    if len(le)==25 and int(le[0]) <= 10:
        print currentQID,le[0],le[-4],le[-3],le[-2]
    l = ifh.readline()
ifh.close()
exit(1)





# for the SLA requirement
# key: freq
# value: numOfQueriesBelongingToThisGroup
histogramDict = {}
histogramDict[0] = 0
histogramDict[1] = 0
histogramDict[2] = 0
histogramDict[3] = 0
histogramDict[4] = 0
histogramDict[5] = 0
histogramDict[6] = 0
histogramDict[7] = 0
histogramDict[8] = 0
histogramDict[9] = 0
histogramDict[10] = 0
'''
# histogram loading process
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/MLLearnedComponent_20141016/UPP-5_10%_ptPowTo0/another5000_gain"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    numOfResultsRetained = int(le[2])
    histogramDict[numOfResultsRetained] += 1
    l = ifh.readline()
ifh.close()
print "Overall:"
sum = 0
for i in range(0,11):
    sum += histogramDict[i]
print "sum:",sum
'''

# Let's add the histogram thing cause I really want to see it.
# Let's check the machine learned curves first
# The two kinds of curves are separated so let's try to plot ONLY one kind of curve first.
# one for prediction and one for optimal
basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
fileList = []

# 1%
plotTuple1 = (144,49966,19.457,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_1%_ptPowTo0_20141018_sortedByGainCostRatio","UPP-5_1%_ptPowTo0")
# 5%
plotTuple2 = (5406,49966,75.378,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_5%_ptPowTo0_20141018_sortedByGainCostRatio","UPP-5_5%_ptPowTo0")
# 10%
plotTuple3 = (9707,49966,1547.76,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedCost_actualCost_UPP-5_10%_ptPowTo0_20141018_sortedByGainCostRatio","UPP-5_10%_ptPowTo0")

# 1%
#plotTuple1 = (18002,47606,29859,4761,"output_queryIDReassignment_predictedGain_actualGain_predictedGain_actualCost_20141014_sortedByGainCostRatio_CURRENT","UPP-5_1%_ptPowTo1")
# 5%
#plotTuple2 = (28826,49966,101888,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedGain_actualCost_UPP-5_5%_ptPowTo1_20141017_sortedByGainCostRatio","UPP-5_5%_ptPowTo1")
# 10%
#plotTuple3 = (34160,49966,154592,4997,"output_queryIDReassignment_predictedGain_actualGain_predictedGain_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByGainCostRatio","UPP-5_10%_ptPowTo1")

fileList.append(plotTuple1)
fileList.append(plotTuple2)
fileList.append(plotTuple3)

# 1%
# modified
#gainBasedNumerator = 22317
#gainDenominator = 51900
#costTotal = 29014
#numOfQueries = 4761

# 5%
# modified
#gainBasedNumerator = 28826
#gainDenominator = 48947 # original: 49966
#costTotal = 101888
#numOfQueries = 4997

# 10%
# modified
#gainBasedNumerator = 34160
#gainDenominator = 49966
#costTotal = 154592
#numOfQueries = 4997

# UPP-5_1%_ptPowTo1
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedGain_actualCost_20141014_sortedByGainCostRatio_CURRENT"
# UPP-5_5%_ptPowTo1
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedGain_actualCost_UPP-5_5%_ptPowTo1_20141017_sortedByGainCostRatio"
# UPP-5_10%_ptPowTo1
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_queryIDReassignment_predictedGain_actualGain_predictedGain_actualCost_UPP-5_10%_ptPowTo1_20141017_sortedByGainCostRatio"

for tuple in fileList:
    (gainBasedNumerator,gainDenominator,costTotal,numOfQueries,fileName,labelValue) = tuple
    ifn = basePath + fileName
    print "ifn:",ifn
    dataPointTupleList = []
    
    gainAvg = gainBasedNumerator / gainDenominator 
    costAvg = costTotal/numOfQueries
    dataPointTupleList.append((gainAvg,costAvg))
    
    ifh = open(ifn,"r")
    l = ifh.readline()
    queryCounter = 0
    
    print "-->beg",gainAvg,costAvg,histogramDict[0],histogramDict[1],histogramDict[2],histogramDict[3],histogramDict[4],histogramDict[5],histogramDict[6],histogramDict[7],histogramDict[8],histogramDict[9],histogramDict[10]
    while l:
        le = l.strip().split(" ")
        currentGain = float(le[2])
        currentCost = float(le[4])
        gainBasedNumerator += currentGain
        currentAlreadyGet = 10 - currentGain
        histogramDict[currentAlreadyGet] -= 1
        histogramDict[10] += 1
        costTotal += currentCost
        gainAvg = gainBasedNumerator / gainDenominator 
        costAvg = costTotal/numOfQueries
        dataPointTupleList.append((gainAvg,costAvg))
        l = ifh.readline()
        if queryCounter % 500 == 0:
            print "-->mid",gainAvg,costAvg,histogramDict[0],histogramDict[1],histogramDict[2],histogramDict[3],histogramDict[4],histogramDict[5],histogramDict[6],histogramDict[7],histogramDict[8],histogramDict[9],histogramDict[10]
        queryCounter += 1
    ifh.close()
    print "Overall:"
    print "len(dataPointTupleList):",len(dataPointTupleList)
    print "dataPointTupleList[0]:",dataPointTupleList[0]
    print "dataPointTupleList[-1]:",dataPointTupleList[-1]
    print "queryCounter:",queryCounter
    print "-->end",gainAvg,costAvg,histogramDict[0],histogramDict[1],histogramDict[2],histogramDict[3],histogramDict[4],histogramDict[5],histogramDict[6],histogramDict[7],histogramDict[8],histogramDict[9],histogramDict[10]
    xDataPoints = []
    yDataPoints = []
    for tuple in dataPointTupleList:
        # print tuple
        (gainAvg,costAvg) = tuple
        xDataPoints.append(costAvg)
        yDataPoints.append(gainAvg)
    plt.plot(xDataPoints, yDataPoints, label=labelValue)
plt.legend(loc="lower right")
plt.show()
exit(1)

shouldBeFlag = False
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_UPP-5_5%_ptPowTo1_tiering_20141017_head_another5000.arff"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_UPP-5_10%_ptPowTo1_tiering_20141017_head_another5000.arff"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(",")
    if len(le) == 235:
        shouldBeFlag = True
    if len(le) != 235 and shouldBeFlag:
        print "error"
        print len(le)
        print l.strip()
        exit(1)
    
    l = ifh.readline()
ifh.close()
exit(1)





'''
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_UPP-5_1%_ptPowTo1_tiering_20141014.arff"
ifh = open(ifn,"r")
l = ifh.readline()
counter = 1
sum = 0.0
while l:
    le = l.strip().split(",")
    if len(le) == 237:
        sum += float(le[236])
    l = ifh.readline()
    counter += 1
ifh.close()
print "Overall:"
print "sum:",sum
print "counter:",counter
print "sum/counter:",sum/counter
exit(1)
'''

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/queryTermsFrom100K_gov2_polyIRToolkitCompatible"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/queryTermsWithTheirLengthsOfInvertedList"
ifh = open(ifn,"r")
l = ifh.readline()
counter = 0
while l:
    le = l.strip().split(" ")
    currentTerm = le[0]
    ofh.write(str(counter) + ":" + currentTerm + "\n")
    l = ifh.readline()
    counter += 1
ifh.close()
ofh.close()
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
exit(1)



# UPP-5 1%
numOfQueriesFallingThrough = 0
dataPointTupleList = []
gainBasedNumerator = 21515
gainDenominator = 49808
costTotal = 29978.7
numOfQueries = 4981
gainAvg = gainBasedNumerator / gainDenominator 
costAvg = costTotal/numOfQueries
dataPointTupleList.append((gainAvg,costAvg))
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_1%_gain_cost_ratio_20141014_sortedByGainCostRatio"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    currentGain = int(le[2]) - int(le[1])
    currentCost = float(le[4])
    if currentGain == 0:
        break
    numOfQueriesFallingThrough += 1
    gainBasedNumerator += currentGain
    costTotal += currentCost
    gainAvg = gainBasedNumerator / gainDenominator 
    costAvg = costTotal/numOfQueries
    dataPointTupleList.append((gainAvg,costAvg))
    l = ifh.readline()
ifh.close()
print "Overall:"
print "len(dataPointTupleList):",len(dataPointTupleList)
print "starting point:",dataPointTupleList[0]
print "ending point:",dataPointTupleList[-1]
xDataPointsSet2 = []
yDataPointsSet2 = []
for tuple in dataPointTupleList:
    # print tuple
    (gainAvg,costAvg) = tuple
    xDataPointsSet2.append(costAvg)
    yDataPointsSet2.append(gainAvg)
plt.plot(xDataPointsSet2, yDataPointsSet2)

# UPP-5 5%
numOfQueriesFallingThrough = 0
dataPointTupleList = []
gainBasedNumerator = 29333
gainDenominator = 49808
costTotal = 105602
numOfQueries = 4981
gainAvg = gainBasedNumerator / gainDenominator 
costAvg = costTotal/numOfQueries
dataPointTupleList.append((gainAvg,costAvg))
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_5%_gain_cost_ratio_20141015_sortedByGainCostRatio"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    currentGain = int(le[2]) - int(le[1])
    currentCost = float(le[4])
    if currentGain == 0:
        break
    numOfQueriesFallingThrough += 1
    gainBasedNumerator += currentGain
    costTotal += currentCost
    gainAvg = gainBasedNumerator / gainDenominator 
    costAvg = costTotal/numOfQueries
    dataPointTupleList.append((gainAvg,costAvg))
    l = ifh.readline()
ifh.close()
print "Overall:"
print "len(dataPointTupleList):",len(dataPointTupleList)
print "starting point:",dataPointTupleList[0]
print "ending point:",dataPointTupleList[-1]
xDataPointsSet2 = []
yDataPointsSet2 = []
for tuple in dataPointTupleList:
    # print tuple
    (gainAvg,costAvg) = tuple
    xDataPointsSet2.append(costAvg)
    yDataPointsSet2.append(gainAvg)
plt.plot(xDataPointsSet2, yDataPointsSet2)

# UPP-5 10%
numOfQueriesFallingThrough = 0
dataPointTupleList = []
gainBasedNumerator = 34501
gainDenominator = 49808
costTotal = 152814
numOfQueries = 4981
gainAvg = gainBasedNumerator / gainDenominator 
costAvg = costTotal/numOfQueries
dataPointTupleList.append((gainAvg,costAvg))
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_10%_gain_cost_ratio_20141015_sortedByGainCostRatio"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    currentGain = int(le[2]) - int(le[1])
    currentCost = float(le[4])
    if currentGain == 0:
        break
    numOfQueriesFallingThrough += 1
    gainBasedNumerator += currentGain
    costTotal += currentCost
    gainAvg = gainBasedNumerator / gainDenominator 
    costAvg = costTotal/numOfQueries
    dataPointTupleList.append((gainAvg,costAvg))
    l = ifh.readline()
ifh.close()
print "Overall:"
print "len(dataPointTupleList):",len(dataPointTupleList)
print "starting point:",dataPointTupleList[0]
print "ending point:",dataPointTupleList[-1]
xDataPointsSet2 = []
yDataPointsSet2 = []
for tuple in dataPointTupleList:
    # print tuple
    (gainAvg,costAvg) = tuple
    xDataPointsSet2.append(costAvg)
    yDataPointsSet2.append(gainAvg)
plt.plot(xDataPointsSet2, yDataPointsSet2)
plt.show()
exit(1)

qidANDQPCTier2Dict = {}
costN = 377755
costD = 13928600
NUM_OF_QUERIS = 4761
baseQualityNumerator = 47610 - 29583
baseQualityDenominator = 47610
firstTierPercentageStr = "1%"
secondTierPercentageStr = "100%"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/qidWithRealCostInTier1AndTier2"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    QPCTier2 = float(le[1])
    qidANDQPCTier2Dict[qid] = QPCTier2 
    l = ifh.readline()
ifh.close()
print "Overall:"
print "len(qidANDQPCTier2Dict):",len(qidANDQPCTier2Dict)
print "qidANDQPCTier2Dict['1']:",qidANDQPCTier2Dict['1']
# exit(1)

baseQuality = baseQualityNumerator / baseQualityDenominator
baseQPC = costN / costD
print "baseQuality:",baseQuality
print "baseQPC:",baseQPC

targetQuality = [0.4,0.5,0.6,0.7,0.8,0.9]
currQuality = baseQuality
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_gain2_cost2_extended_20140915_sortedByPredictedGainCost.txt"
ifh = open(ifn,"r")
l = ifh.readline()
numOfQueriesFallingThrough = 1
for currTargetValue in targetQuality:
    if currQuality > currTargetValue:
        pass
    else:
        while l:
            le = l.strip().split(" ")
            qid = le[0]
            baseQualityNumerator += float(le[2])
            # print l.strip()
            if qid in qidANDQPCTier2Dict:
                costN += qidANDQPCTier2Dict[qid]
            else:
                print qid,"missing"
            l = ifh.readline()
            numOfQueriesFallingThrough += 1
            currQuality = baseQualityNumerator/baseQualityDenominator
            currCost = costN/costD
            if currQuality > currTargetValue:
                #print "currTargetValue:",currTargetValue
                #print "currQuality:",currQuality
                #print "currCost:",currCost
                #print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough
                #print "%OfQueriesFallingThrough:",numOfQueriesFallingThrough/NUM_OF_QUERIS
                #print
                print currTargetValue,currQuality,currCost,numOfQueriesFallingThrough,numOfQueriesFallingThrough/NUM_OF_QUERIS,firstTierPercentageStr,"100%","MLLearned_UPP-5_1%",baseQuality,baseQPC,currQuality-baseQuality,currCost-baseQPC
                break
print
ifh.close()
exit(1)

qidCostTier1 ={}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_1%_QID_QPC_testingQueries_20141014"
ifh0 = open(ifn0,"r")
l = ifh0.readline()
while l:
    le = l.strip().split(" ")
    qidCostTier1[le[0]] = float(le[1])
    l = ifh0.readline()
ifh0.close()
print "Overall:"
print "len(qidCostTier1):",len(qidCostTier1)

qidCostTier2 = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_90%_QID_QPC_testingQueries_20141014"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    qidCostTier2[le[0]] = float(le[1])
    l = ifh1.readline()
ifh0.close()
print "Overall:"
print "len(qidCostTier2):",len(qidCostTier2)    
ifh1.close()

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_1%_gain_cost_20141014"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_1%_gain_20141014"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    ofh.write(le[0] + " " + le[1] + " " + le[2] + " " + str(qidCostTier1[le[0]]) + " " + str(qidCostTier2[le[0]]) + "\n")
    l = ifh.readline()
ifh.close()
ofh.close()
print "Overall:"
print "ifn0:",ifn0
print "ifn1:",ifn1
print "ifn:",ifn
print "ofn:",ofn
exit(1)

qidAndCostDict0 = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_1%_QID_QPC_head13961_20141014"
ifh0 = open(ifn0,"r")
l = ifh0.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    qpc = float(le[1])
    qidAndCostDict0[qid] = qpc
    l = ifh0.readline()
ifh0.close()
print "len(qidAndCostDict0):",len(qidAndCostDict0)

qidAndCostDict1 = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_90%_QID_QPC_head13961_20141014"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    qpc = float(le[1])
    qidAndCostDict1[qid] = qpc
    l = ifh1.readline()
ifh1.close()
print "len(qidAndCostDict1):",len(qidAndCostDict1)

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_20141014_step8_extended.arff"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_20140915_step8_extended.arff"
ifh = open(ifn,"r")

l = ifh.readline()
while l:
    le = l.strip().split(",")
    # print len(le)
    if len(le) == 237:
        qid = le[0]
        realQueryProcessingCostOR_tier1 = float(le[234])
        realQueryProcessingCostOR_tier2 = float(le[236])
        outputLine = ""
        for i in range(0,234):
            outputLine += le[i] + ","
        outputLine += str(qidAndCostDict0[qid]) + ","
        outputLine += le[235] + ","
        outputLine += str(qidAndCostDict1[qid])
        outputLine += "\n"
        ofh.write(outputLine)
    else:
        ofh.write(l)
    l = ifh.readline()
ifh.close()
print "Overall:"
print "ifn0:",ifn0
print "ifn1:",ifn1
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
ofh.close()
exit(1)

counter = 1
writeFlag = False
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/max-score_1%_UPP-5_rawResults_20141013"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/max-score_90%_UPP-5_rawResults_20141013"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/max-score_1%_UPP-5_rawResults_head13961_20141013"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/max-score_90%_UPP-5_rawResults_head13961_20141013"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    if l.strip().startswith("WWW2015 Showing"):
        if writeFlag:
            print counter,l.strip().split(" ")[-2][1:]
        counter += 1
        if counter == 13962:
            writeFlag = True
            counter = 1
    l = ifh.readline()
ifh.close()
exit(1)







basePath = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/2%_docHitsIndex/combineIndex/"
ifn = "index.lex"

ifn = basePath + ifn
ifh = open(ifn,"r")

statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
termCounter = 0
while numOfBytesRead < fileSize:
    
    byteString_numOfLayers = ifh.read(4)
    byteString_termLen = ifh.read(4)
    
    (numOfLayers,) = unpack( "i", byteString_numOfLayers)
    (termLen,) = unpack( "i", byteString_termLen)
    
    byteString_term = ifh.read(termLen)
    
    byteString_numOfDocs = ifh.read(4)
    (numOfDocs,) = unpack( "i", byteString_numOfDocs)
    
    byteString_numOfChunks = ifh.read(4)
    (numOfChunks,) = unpack( "i", byteString_numOfChunks)
    
    byteString_numOfChunksLastBlock = ifh.read(4)
    (numOfChunksLastBlock,) = unpack( "i", byteString_numOfChunksLastBlock)
    
    byteString_numOfBlocks = ifh.read(4)
    (numOfBlocks,) = unpack( "i", byteString_numOfBlocks)
    
    byteString_blockNum = ifh.read(4)
    (blockNum,) = unpack( "i", byteString_blockNum)
    
    byteString_chunkNum = ifh.read(4)
    (chunkNum,) = unpack( "i", byteString_chunkNum)
    
    byteString_scoreThreshold = ifh.read(4)
    (scoreThreshold,) = unpack( "f", byteString_scoreThreshold)
    
    byteString_externalIndexOffset = ifh.read(8) # for the externalIndexOffset 
    
    numOfBytesRead += 4*2 + termLen + 4*9
    print termCounter,byteString_term,numOfDocs,scoreThreshold,numOfBytesRead
    termCounter += 1
exit(1)

# let's do the moving and combine them into the main index
# rename all the indexes

fileList = []
fileList.append("subIndex0")
fileList.append("subIndex1")
fileList.append("subIndex2")
fileList.append("subIndex3")
fileList.append("subIndex4")
fileList.append("subIndex5")
fileList.append("subIndex6")
fileList.append("subIndex7")
fileList.append("subIndex8")
fileList.append("subIndex9")
fileList.append("subIndex10")
fileList.append("subIndex11")
fileList.append("subIndex12")
fileList.append("subIndex13")
fileList.append("subIndex14")
fileList.append("subIndex15")
fileList.append("subIndex16")
fileList.append("subIndex17")
fileList.append("subIndex18")
fileList.append("subIndex19")
fileList.append("subIndex20")
fileList.append("subIndex21")
fileList.append("subIndex22")
fileList.append("subIndex23")
fileList.append("subIndex24")
fileList.append("subIndex25")

basePath = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/1%_docHitsIndex/"
targetPath = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/1%_docHitsIndex/combineIndex"
for fileName in fileList:
    compleFolderName = basePath + fileName
    print compleFolderName
    for filename in os.listdir(compleFolderName):
        # print filename
        if filename.startswith("index.ext"):
            print filename
            os.rename(compleFolderName + "/" + filename, targetPath + "/" + filename)
        if filename.startswith("index.idx"):
            print filename
            os.rename(compleFolderName + "/" + filename, targetPath + "/" + filename)
        if filename.startswith("index.lex"):
            print filename
            os.rename(compleFolderName + "/" + filename, targetPath + "/" + filename)
        if filename.startswith("index.meta"):
            print filename
            os.rename(compleFolderName + "/" + filename, targetPath + "/" + filename)
exit(1)

# rename all the indexes
basePath = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/1%_docHitsIndex/"
fileList = []
fileList.append("subIndex0")
fileList.append("subIndex1")
fileList.append("subIndex2")
fileList.append("subIndex3")
fileList.append("subIndex4")
fileList.append("subIndex5")
fileList.append("subIndex6")
fileList.append("subIndex7")
fileList.append("subIndex8")
fileList.append("subIndex9")
fileList.append("subIndex10")
fileList.append("subIndex11")
fileList.append("subIndex12")
fileList.append("subIndex13")
fileList.append("subIndex14")
fileList.append("subIndex15")
fileList.append("subIndex16")
fileList.append("subIndex17")
fileList.append("subIndex18")
fileList.append("subIndex19")
fileList.append("subIndex20")
fileList.append("subIndex21")
fileList.append("subIndex22")
fileList.append("subIndex23")
fileList.append("subIndex24")
fileList.append("subIndex25")

for fileName in fileList:
    compleFolderName = basePath + fileName
    print compleFolderName
    thatNumberInStr = int(fileName[8:])
    print thatNumberInStr
    for filename in os.listdir(compleFolderName):
        # print filename
        if filename.endswith("index.ext.0.0"):
            print filename
            os.rename(compleFolderName + "/" + filename, compleFolderName + "/" + filename[:12] + str(thatNumberInStr))
        if filename.endswith("index.idx.0.0"):
            print filename
            os.rename(compleFolderName + "/" + filename, compleFolderName + "/" + filename[:12] + str(thatNumberInStr))
        if filename.endswith("index.lex.0.0"):
            print filename
            os.rename(compleFolderName + "/" + filename, compleFolderName + "/" + filename[:12] + str(thatNumberInStr))
        if filename.endswith("index.meta.0.0"):
            print filename
            os.rename(compleFolderName + "/" + filename, compleFolderName + "/" + filename[:13] + str(thatNumberInStr))
exit(1)

# let's do the moving and combine them into the main index
# rename all the indexes
basePath = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/1%_docHitsIndex/"
fileList = []
fileList.append("subIndex0")
fileList.append("subIndex1")
fileList.append("subIndex2")
fileList.append("subIndex3")
fileList.append("subIndex4")
fileList.append("subIndex5")
fileList.append("subIndex6")
fileList.append("subIndex7")
fileList.append("subIndex8")
fileList.append("subIndex9")
fileList.append("subIndex10")
fileList.append("subIndex11")
fileList.append("subIndex12")
fileList.append("subIndex13")
fileList.append("subIndex14")
fileList.append("subIndex15")
fileList.append("subIndex16")
fileList.append("subIndex17")
fileList.append("subIndex18")
fileList.append("subIndex19")
fileList.append("subIndex20")
fileList.append("subIndex21")
fileList.append("subIndex22")
fileList.append("subIndex23")
fileList.append("subIndex24")
fileList.append("subIndex25")

lookAtFile = ""
tempCounter = 0
for fileName in fileList:
    compleFolderName = basePath + fileName
    # print compleFolderName
    thatNumberInStr = int(fileName[8:])
    lookAtFile = compleFolderName + "/" + "log_20141012_" + str(thatNumberInStr)
    ifh = open(lookAtFile,"r")
    l = ifh.readline()
    while l:
        if l.strip().startswith("docID:"):
            tempCounter += 1
        l = ifh.readline()
    print lookAtFile,tempCounter

print "Overall:"
print "tempCounter:",tempCounter
exit(1)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/gov2_wholeLexiconTermID_Term_ListLength_sortedByAlphabeticalOrder"
ifh = open(ifn,"r")
l = ifh.readline()
counter = 1
while l:
    le = l.strip().split(" ")
    termID = le[0]
    term = le[1]
    listLength = le[2]
    if term.strip() != "":
        pass
    else:
        print termID,term,listLength
        exit(1)
    l = ifh.readline()
    counter += 1
    
    if counter % 1000000 == 0:
        print "counter:",counter
exit(1)

selectedDocDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/tier1_OR_1%_index/tier1_OR_1_0_1M"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/tier1_OR_1%_whole_docIDs"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    docID = le[0]
    selectedDocDict[docID] = 1
    l = ifh.readline()
ifh.close()
print "len(selectedDocDict):",len(selectedDocDict)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/gov2_docID_trecID_numOfPostings"
ifh = open(ifn,"r")
l = ifh.readline()
sum = 0
while l:
    le = l.strip().split(" ")
    docID = le[0]
    if int(docID) >= 1000000:
        break
    if docID in selectedDocDict:
        sum += int(le[2])
        print sum
    l = ifh.readline()
ifh.close()
print "sum:",sum

exit(1)

'''
# under construction !
# /home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/tier1_OR_1_0_1M\;
# /home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/GOV2_documentPostingArray_0M_1M > log1 &

part1 = "./irtk --local --convert --config-options="
part2 = "document_posting_array_file_to_support_doc_hits="
part2 = "doc_hit_document_partition_file_name="
basePath = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
fileList = ["1%_docHitsIndex","2%_docHitsIndex","5%_docHitsIndex","10%_docHitsIndex","20%_docHitsIndex","30%_docHitsIndex","40%_docHitsIndex","50%_docHitsIndex","60%_docHitsIndex","70%_docHitsIndex"]
completeIFNPath = ""
for fileName in fileList:
    completeIFNPath = basePath + fileName + "/subIndex"
    for i in range(0,26):
        mkdirStr = completeIFNPath + str(i)
        print "cd",mkdirStr
        # os.mkdir(mkdirStr)
exit(1)
'''

basePath = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"
fileList = ["1%_docHitsIndex","2%_docHitsIndex","5%_docHitsIndex","10%_docHitsIndex","20%_docHitsIndex","30%_docHitsIndex","40%_docHitsIndex","50%_docHitsIndex","60%_docHitsIndex","70%_docHitsIndex"]
completeIFNPath = ""
for fileName in fileList:
    completeIFNPath = basePath + fileName + "/subIndex"
    for i in range(0,26):
        mkdirStr = '"' + completeIFNPath + str(i) + '/' + '"'
        print "cd",mkdirStr
        # os.mkdir(mkdirStr)
exit(1)

programName = sys.argv[0]
percentageIndicator = sys.argv[1]

# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/tier1_OR_1%_whole_docIDs"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_whole_docIDs"

ofn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_0_1M"
ofn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_1M_2M"
ofn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_2M_3M"
ofn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_3M_4M"
ofn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_4M_5M"
ofn6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_5M_6M"
ofn7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_6M_7M"
ofn8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_7M_8M"
ofn9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_8M_9M"
ofn10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_9M_10M"
ofn11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_10M_11M"
ofn12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_11M_12M"
ofn13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_12M_13M"
ofn14 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_13M_14M"
ofn15 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_14M_15M"
ofn16 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_15M_16M"
ofn17 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_16M_17M"
ofn18 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_17M_18M"
ofn19 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_18M_19M"
ofn20 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_19M_20M"
ofn21 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_20M_21M"
ofn22 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_21M_22M"
ofn23 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_22M_23M"
ofn24 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_23M_24M"
ofn25 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_24M_25M"
ofn26 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/" + percentageIndicator + "_index/" + percentageIndicator + "_index_25M_END"

ofh1 = open(ofn1,"w")
ofh2 = open(ofn2,"w")
ofh3 = open(ofn3,"w")
ofh4 = open(ofn4,"w")
ofh5 = open(ofn5,"w")
ofh6 = open(ofn6,"w")
ofh7 = open(ofn7,"w")
ofh8 = open(ofn8,"w")
ofh9 = open(ofn9,"w")
ofh10 = open(ofn10,"w")
ofh11 = open(ofn11,"w")
ofh12 = open(ofn12,"w")
ofh13 = open(ofn13,"w")
ofh14 = open(ofn14,"w")
ofh15 = open(ofn15,"w")
ofh16 = open(ofn16,"w")
ofh17 = open(ofn17,"w")
ofh18 = open(ofn18,"w")
ofh19 = open(ofn19,"w")
ofh20 = open(ofn20,"w")
ofh21 = open(ofn21,"w")
ofh22 = open(ofn22,"w")
ofh23 = open(ofn23,"w")
ofh24 = open(ofn24,"w")
ofh25 = open(ofn25,"w")
ofh26 = open(ofn26,"w")

ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    docID = int(le[0])
    if docID >= 0 and docID < 1000000:
        ofh1.write(l)
    if docID >= 1000000 and docID < 2000000:
        ofh2.write(l)
    if docID >= 2000000 and docID < 3000000:
        ofh3.write(l)
    if docID >= 3000000 and docID < 4000000:
        ofh4.write(l)
    if docID >= 4000000 and docID < 5000000:
        ofh5.write(l)
    if docID >= 5000000 and docID < 6000000:
        ofh6.write(l)
    if docID >= 6000000 and docID < 7000000:
        ofh7.write(l)
    if docID >= 7000000 and docID < 8000000:
        ofh8.write(l)
    if docID >= 8000000 and docID < 9000000:
        ofh9.write(l)
    if docID >= 9000000 and docID < 10000000:
        ofh10.write(l)
    if docID >= 10000000 and docID < 11000000:
        ofh11.write(l)
    if docID >= 11000000 and docID < 12000000:
        ofh12.write(l)
    if docID >= 12000000 and docID < 13000000:
        ofh13.write(l)
    if docID >= 13000000 and docID < 14000000:
        ofh14.write(l)
    if docID >= 14000000 and docID < 15000000:
        ofh15.write(l)    
    if docID >= 15000000 and docID < 16000000:
        ofh16.write(l)
    if docID >= 16000000 and docID < 17000000:
        ofh17.write(l)
    if docID >= 17000000 and docID < 18000000:
        ofh18.write(l)
    if docID >= 18000000 and docID < 19000000:
        ofh19.write(l)
    if docID >= 19000000 and docID < 20000000:
        ofh20.write(l)
    if docID >= 20000000 and docID < 21000000:
        ofh21.write(l)
    if docID >= 21000000 and docID < 22000000:
        ofh22.write(l)
    if docID >= 22000000 and docID < 23000000:
        ofh23.write(l)
    if docID >= 23000000 and docID < 24000000:
        ofh24.write(l)
    if docID >= 24000000 and docID < 25000000:
        ofh25.write(l)
    if docID >= 25000000 and docID < 26000000:
        ofh26.write(l)
    l = ifh.readline()
ifh.close()
ofh1.close()
ofh2.close()
ofh3.close()
ofh4.close()
ofh5.close()
ofh6.close()
ofh7.close()
ofh8.close()
ofh9.close()
ofh10.close()
ofh11.close()
ofh12.close()
ofh13.close()
ofh14.close()
ofh15.close()
ofh16.close()
ofh17.close()
ofh18.close()
ofh19.close()
ofh20.close()
ofh21.close()
ofh22.close()
ofh23.close()
ofh24.close()
ofh25.close()
ofh26.close()
print "Overall:"
print "ifn:",ifn
print "ofn1:",ofn1
print "ofn2:",ofn2
print "ofn3:",ofn3
print "ofn4:",ofn4
print "ofn5:",ofn5
print "ofn6:",ofn6
print "ofn7:",ofn7
print "ofn8:",ofn8
print "ofn9:",ofn9
print "ofn10:",ofn10
print "ofn11:",ofn11
print "ofn12:",ofn12
print "ofn13:",ofn13
print "ofn14:",ofn14
print "ofn15:",ofn15
print "ofn16:",ofn16
print "ofn17:",ofn17
print "ofn18:",ofn18
print "ofn19:",ofn19
print "ofn20:",ofn20
print "ofn21:",ofn21
print "ofn22:",ofn22
print "ofn23:",ofn23
print "ofn24:",ofn24
print "ofn25:",ofn25
print "ofn26:",ofn26
exit(1)

docIDNumOfPostingsDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/gov2_docID_trecID_numOfPostings"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 1
while l:
    le = l.strip().split(" ")
    docID = le[0]
    numOfPostings = int(le[2])
    docIDNumOfPostingsDict[docID] = numOfPostings
    if lineCounter == 1100000:
        break
    l = ifh.readline()
    lineCounter += 1
ifh.close()
print "Overall:"
print "len(docIDNumOfPostingsDict):",len(docIDNumOfPostingsDict)

sum = 0
docCounter = 0
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/tier1_OR_1_0_1M"
ifh2 = open(ifn2,"r")
l = ifh2.readline()
while l:
    le = l.strip().split(" ")
    docID = le[0]
    if docID not in docIDNumOfPostingsDict:
        print "Problem"
        exit(1)
    else:
        sum += docIDNumOfPostingsDict[docID]
        docCounter += 1
    l = ifh2.readline()
ifh2.close()
print "Overall:"
print "sum: ",sum
print "docCounter: ",docCounter
exit(1)





termIDDict1 = {}
termIDDict2 = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docID_38_term_termID_RAW"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/docID_38_term_termID_RAW_sortedByAlphabeticalOrder"
ifh1 = open(ifn1,"r")
ifh2 = open(ifn2,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    termID = le[0]
    termIDDict1[termID] = 1 
    l = ifh1.readline()

l = ifh2.readline()
while l:
    le = l.strip().split(" ")
    termID = le[0]
    termIDDict2[termID] = 1 
    l = ifh2.readline()    
print "Overall:"
print "len(termIDDict1):", len(termIDDict1)
print "len(termIDDict2):", len(termIDDict2)
for termID in termIDDict2:
    if termID in termIDDict1:
        pass
    else:
        print termID
        exit(1)
ifh1.close()
ifh2.close()
exit(1)

inputFileList = []
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/GOV2_DocumentPostingArray_WHOLE_20140627_pt_2DTableFromTOP100Postings.binary"
inputFileList.append(ifn)
numOfDocumentsProcessed = 0
totalNumOfPostings = 0
for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "currentInputFileName: ",currentInputFileName
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        # print docID,score1
        totalNumOfPostings += score1
        numOfBytesRead += 8 + score1 * 4 * 5
        inputFileHandler0.seek(numOfBytesRead)
        # inputFileHandler0.seek(numOfBytesRead)
    inputFileHandler0.close()
print "totalNumOfPostings:",totalNumOfPostings
print "Ends."
exit(1)

labelAndNumOfPostingsDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/secondFactorProbability/gov2/termPieceInfoForQueryTerms_stepGap_2_OLD"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    listLengthClassLabel = le[2]
    numOfRanges = int(le[3])
    labelAndNumOfPostingsDict[le[2]] = int(le[3]) 
    l = ifh.readline()
print "Overall:"
print "len(labelAndNumOfPostingsDict):",len(labelAndNumOfPostingsDict)
sum = 0
for currentNumOfPosting in labelAndNumOfPostingsDict:
    sum += labelAndNumOfPostingsDict[currentNumOfPosting]
print sum * 14
ifh.close()
exit(1)



"./irtk --local --cat < commandValue --config-options=temp_index_path=/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_indexes\;temp_index_name=LEAVE_wei_uniform_pruning_2013-09-12-16-12-30_None_None\;3D_table_ifn=/home/vgc/wei/workspace/NYU_IRTK/data/secondFactorProbability/gov2/subTermPieceInfoForQueryTerms_stepGap_2/xab\;3D_table_ofn=/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xab_termDocHitsIntegration_20141008"

part1 = "./irtk --local --cat < commandValue --config-options=temp_index_path=/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_indexes\;temp_index_name=LEAVE_wei_uniform_pruning_2013-09-12-16-12-30_None_None\;"
part2 = ""
part3 = ""
part4 = ""
completeIFNPath = ""

for dirname, dirnames, filenames in os.walk('/home/vgc/wei/workspace/NYU_IRTK/data/secondFactorProbability/gov2/subTermPieceInfoForQueryTerms_stepGap_2'):
    for filename in filenames:
        part2 = "3D_table_ifn=" + os.path.join(dirname, filename) + "\;"
        part3 = "3D_table_ofn=" + "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/" + filename + "_termDocHitsIntegration_20141008"
        part4 = " > " + "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/" + filename + "_20141008"
        completeIFNPath = part1 + part2 + part3 + part4
        print completeIFNPath
        # print filename
        #print part1
        #print part2
        #print part3
        #print

exit(1)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xab_termDocHitsIntegration_20141008"
ifh = open(ifn,"r")
l = ifh.readline()
sum = 0
while l:
    le = l.strip().split(" ")
    for i in range(4,len(le)):
        sum += int(le[i])
    print sum
    sum = 0
    l = ifh.readline()
ifh.close()
print "Overall:"
print sum
exit(1)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/termDocHitsIntegration_20141008"
ifh = open(ifn,"r")
l = ifh.readline()
sum = 0
while l:
    le = l.strip().split(" ")
    for i in range(4,len(le)):
        sum += int(le[i])
    print sum
    sum = 0
    l = ifh.readline()
ifh.close()
exit(1)

# step0: assign the bin value to the corresponding docIDs
# load the docID bins
# It is a dictionary
# key: docID
# value: the corresponding bin value of a specific doc
docIDAndDocHitFreqDict = {}
ofn = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP1000/AND/docIDAndTrecIDAndBinValue_20141008"
ofh = open(ofn,"w")

ifn = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP1000/AND/docIDAndTrecIDHitsCollection_notSorted_AND_fakeQueries_20140826"
ifh = open(ifn,"r")

l = ifh.readline()
lineCounter = 1
binValue = 0
while l:
    le = l.strip().split(" ")
    docID = le[0]
    trecID = le[1]
    docHit = int(le[2])
    # do a classification using the docHit
    #bin0: 0
    #bin1: 1
    #bin2: 2
    #bin3: 3-4
    #bin4: 5-7
    #bin5: 8-12
    #bin6: 12-20
    #bin7: 20-40
    #bin8: 40-80
    #bin9: 80-160
    #bin10: 160-200
    #bin11: 200-500
    #bin12: 500-1000
    #bin13: > 1000
    if docHit == 1:
        binValue = 1
    elif docHit == 2:
        binValue = 2
    elif docHit >= 3 and docHit < 4:
        binValue = 3
    elif docHit >= 5 and docHit < 7:
        binValue = 4
    elif docHit >= 8 and docHit < 12:
        binValue = 5
    elif docHit >= 12 and docHit < 20:
        binValue = 6
    elif docHit >= 20 and docHit < 40:
        binValue = 7
    elif docHit >= 40 and docHit < 80:
        binValue = 8
    elif docHit >= 80 and docHit < 160:
        binValue = 9
    elif docHit >= 160 and docHit < 200:
        binValue = 10
    elif docHit >= 200 and docHit < 500:
        binValue = 11
    elif docHit >= 500 and docHit < 1000:
        binValue = 12
    else:
        binValue = 13
    docIDAndDocHitFreqDict[docID] = binValue
    ofh.write(str(docID) + " " + str(trecID) + " " + str(docHit) + " " + str(binValue) + "\n")
    l = ifh.readline()
    lineCounter += 1
    if lineCounter % 100000 == 0:
        print lineCounter,"lines processed."

print "Overall:"
print "len(docIDAndDocHitFreqDict):",len(docIDAndDocHitFreqDict)
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
ofh.close()
exit(1)


trecIDANDDocIDDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/gov2_docID_trecID_numOfPostings"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    docID = le[0]
    trecID = le[1]
    trecIDANDDocIDDict[trecID] = docID
    l = ifh.readline()
print "Overall:"
print "len(trecIDANDDocIDDict):",len(trecIDANDDocIDDict)
ifh.close()

ofn = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP1000/AND/docIDAndTrecIDHitsCollection_notSorted_AND_fakeQueries_20140826"
ofh = open(ofn,"w")

ifn0 = "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP1000/AND/trecIDHitsCollection_notSorted_AND_fakeQueries_20140826"
ifh0 = open(ifn0,"r")
l = ifh0.readline()
while l:
    le = l.strip().split(" ")
    trecID = le[0]
    ofh.write(trecIDANDDocIDDict[trecID] + " " + l.strip() + "\n")
    l = ifh0.readline()
ifh0.close()
ofh.close()
print "Overall:"
print "ifn0:",ifn0
print "ifn:",ifn
print "ofn:",ofn
exit(1)

qidWithInfoTupleDict = {}
#ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/ptPowTo0_step1"
#ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/ptPowToDot1_step1"
#ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/ptPowToDot3_step1"
#ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/ptPowToDot5_step1"
#ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/ptPowToDot7_step1"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/ptPowToDot9_step1"
#ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/ptPowTo1_step1"

ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    currQID = le[3]
    currQIDOriginalTOP10ResultsRetrieved = int(le[4])
    currQIDQPC = int(le[5])
    currTuple = (currQIDOriginalTOP10ResultsRetrieved,currQIDQPC)
    if currQID not in qidWithInfoTupleDict:
        qidWithInfoTupleDict[currQID] = []
        qidWithInfoTupleDict[currQID].append(currTuple)
    else:
        qidWithInfoTupleDict[currQID].append(currTuple)
    l = ifh.readline()
ifh.close()
print "Overall:"
print "qidWithInfoTupleDict['100000']:",qidWithInfoTupleDict['100000']
print "qidWithInfoTupleDict['99999']:",qidWithInfoTupleDict['99999']
tempSum1 = 0
tempSum2 = 0
for qid in qidWithInfoTupleDict:
    (results,QPC) = qidWithInfoTupleDict[qid][0]
    tempSum1 += results
    tempSum2 += QPC
print tempSum1
print tempSum2

ofnBase1 = ifn + "_"
ofn = ""
for i in range(0,len(qidWithInfoTupleDict['100000'])):
    ofn = ofnBase1 + str(i)
    ofh = open(ofn,"w")
    for j in range(95001,100001):
        if str(j) not in qidWithInfoTupleDict:
            pass
        else:
            (v1,v2) = qidWithInfoTupleDict[str(j)][i]
            ofh.write(str(j) + " " + str(v1) + " " + str(v2) + "\n")
    ofh.close()
    print "ofn:",ofn
exit(1)

#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_1%", "tonsOfIndexesMaking_20140920/1PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/1PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_2%", "tonsOfIndexesMaking_20140920/2PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/2PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_3%", "tonsOfIndexesMaking_20140920/3PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/3PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_4%", "tonsOfIndexesMaking_20140920/4PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/4PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_5%", "tonsOfIndexesMaking_20140920/5PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/5PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_6%", "tonsOfIndexesMaking_20140920/6PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/6PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_7%", "tonsOfIndexesMaking_20140920/7PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/7PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_8%", "tonsOfIndexesMaking_20140920/8PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/8PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_9%", "tonsOfIndexesMaking_20140920/9PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/9PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_10%", "tonsOfIndexesMaking_20140920/10PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/10PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_20%", "tonsOfIndexesMaking_20140920/20PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/20PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_30%", "tonsOfIndexesMaking_20140920/30PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/30PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_40%", "tonsOfIndexesMaking_20140920/40PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/40PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_50%", "tonsOfIndexesMaking_20140920/50PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/50PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_60%", "tonsOfIndexesMaking_20140920/60PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/60PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot9_70%", "tonsOfIndexesMaking_20140920/70PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/70PercentageIndex_UPP-5_ptPowToDot9_20140920/index.lex.0.0") )

file0 = sys.argv[0]
file1 = sys.argv[1]
file2 = sys.argv[2]
tuple = (file0,file1,file2)

needToDealWithTripleList = []
needToDealWithTripleList.append(tuple)


#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_20%", "tonsOfIndexesMaking_20140920/20PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/20PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_30%", "tonsOfIndexesMaking_20140920/30PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/30PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_40%", "tonsOfIndexesMaking_20140920/40PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/40PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_50%", "tonsOfIndexesMaking_20140920/50PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/50PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_60%", "tonsOfIndexesMaking_20140920/60PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/60PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_70%", "tonsOfIndexesMaking_20140920/70PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/70PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )


#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_1%", "tonsOfIndexesMaking_20140920/1PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/1PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_2%", "tonsOfIndexesMaking_20140920/2PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/2PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_3%", "tonsOfIndexesMaking_20140920/3PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/3PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_4%", "tonsOfIndexesMaking_20140920/4PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/4PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_5%", "tonsOfIndexesMaking_20140920/5PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/5PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_6%", "tonsOfIndexesMaking_20140920/6PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/6PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_7%", "tonsOfIndexesMaking_20140920/7PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/7PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_8%", "tonsOfIndexesMaking_20140920/8PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/8PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_9%", "tonsOfIndexesMaking_20140920/9PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/9PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_10%", "tonsOfIndexesMaking_20140920/10PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/10PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_20%", "tonsOfIndexesMaking_20140920/20PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/20PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_30%", "tonsOfIndexesMaking_20140920/30PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/30PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_40%", "tonsOfIndexesMaking_20140920/40PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/40PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowToDot5_50%", "tonsOfIndexesMaking_20140920/50PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/50PercentageIndex_UPP-5_ptPowToDot5_20140920/index.lex.0.0") )

#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_1%", "tonsOfIndexesMaking_20140920/1PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/1PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_2%", "tonsOfIndexesMaking_20140920/2PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/2PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_3%", "tonsOfIndexesMaking_20140920/3PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/3PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_4%", "tonsOfIndexesMaking_20140920/4PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/4PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_5%", "tonsOfIndexesMaking_20140920/5PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/5PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_6%", "tonsOfIndexesMaking_20140920/6PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/6PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_7%", "tonsOfIndexesMaking_20140920/7PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/7PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_8%", "tonsOfIndexesMaking_20140920/8PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/8PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_9%", "tonsOfIndexesMaking_20140920/9PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/9PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
#needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_10%", "tonsOfIndexesMaking_20140920/10PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/10PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
##needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_20%", "tonsOfIndexesMaking_20140920/20PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/20PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
##needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_30%", "tonsOfIndexesMaking_20140920/30PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/30PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
##needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_40%", "tonsOfIndexesMaking_20140920/40PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/40PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )
##needToDealWithTripleList.append( ("termUpperBoundsOfListLength_20141001_ptPowTo1_50%", "tonsOfIndexesMaking_20140920/50PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0_Original", "tonsOfIndexesMaking_20140920/50PercentageIndex_UPP-5_ptPowTo1_20140919/index.lex.0.0") )

basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/"

for tuple in needToDealWithTripleList:
    (ifn0,ifn,ofn) = tuple
    termUpperBoundDict = {}
    ifn0 = basePath + ifn0
    ifh0 = open(ifn0,"r")
        
    ifn = basePath + ifn
    ifh = open(ifn,"r")
    
    ofn = basePath + ofn
    ofh = open(ofn,"wb")
    
    print "Reading",ifn0
    print "Reading",ifn
    print "Writing",ofn
    
    #step1:
    #input format
    #0 0 128 1.42934
    #1 00 105 2.65071
    for l in ifh0.readlines():
        le = l.strip().split(" ")
        term = le[1]
        termUpperBound = float(le[3])
        termUpperBoundDict[term] = termUpperBound
    print "len(termUpperBoundDict):",len(termUpperBoundDict)
    
    #step2:
    statinfo = os.stat(ifn)
    fileSize = statinfo.st_size
    print "currentInputFileName: ",ifn
    print "file size:",fileSize
    numOfBytesRead = 0
    termCounter = 0
    while numOfBytesRead < fileSize:
        
        byteString_numOfLayers = ifh.read(4)
        byteString_termLen = ifh.read(4)
        
        (numOfLayers,) = unpack( "i", byteString_numOfLayers)
        (termLen,) = unpack( "i", byteString_termLen)
        
        byteString_term = ifh.read(termLen)
        
        byteString_numOfDocs = ifh.read(4)
        (numOfDocs,) = unpack( "i", byteString_numOfDocs)
        
        byteString_numOfChunks = ifh.read(4)
        (numOfChunks,) = unpack( "i", byteString_numOfChunks)
        
        byteString_numOfChunksLastBlock = ifh.read(4)
        (numOfChunksLastBlock,) = unpack( "i", byteString_numOfChunksLastBlock)
        
        byteString_numOfBlocks = ifh.read(4)
        (numOfBlocks,) = unpack( "i", byteString_numOfBlocks)
        
        byteString_blockNum = ifh.read(4)
        (blockNum,) = unpack( "i", byteString_blockNum)
        
        byteString_chunkNum = ifh.read(4)
        (chunkNum,) = unpack( "i", byteString_chunkNum)
        
        byteString_scoreThreshold = ifh.read(4)
        (scoreThreshold,) = unpack( "f", byteString_scoreThreshold)
        
        byteString_externalIndexOffset = ifh.read(8) # for the externalIndexOffset 
        
        numOfBytesRead += 4*2 + termLen + 4*9
        # print termCounter,byteString_term,numOfDocs,scoreThreshold,termUpperBoundDict[byteString_term],numOfBytesRead
        termCounter += 1
        
        ofh.write(byteString_numOfLayers)
        ofh.write(byteString_termLen)
        ofh.write(byteString_term)
        ofh.write(byteString_numOfDocs)
        ofh.write(byteString_numOfChunks)
        ofh.write(byteString_numOfChunksLastBlock)
        ofh.write(byteString_numOfBlocks)
        ofh.write(byteString_blockNum)
        ofh.write(byteString_chunkNum)
        ofh.write(pack("1f", termUpperBoundDict[byteString_term]))
        ofh.write(byteString_externalIndexOffset)
    
    print "Overall:"
    print "ifn0:",ifn0
    print "ifn:",ifn
    print "ofn:",ofn
    print "# of terms in the lexicon:",termCounter
    print "DONE."
    print
    ifh0.close()
    ifh.close()
    ofh.close()

exit(1)

'''
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/index.lex.0.0"
ifh = open(ifn,"r")

statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
termCounter = 0
while numOfBytesRead < fileSize:
    
    byteString_numOfLayers = ifh.read(4)
    byteString_termLen = ifh.read(4)
    
    (numOfLayers,) = unpack( "i", byteString_numOfLayers)
    (termLen,) = unpack( "i", byteString_termLen)
    
    byteString_term = ifh.read(termLen)
    
    byteString_numOfDocs = ifh.read(4)
    (numOfDocs,) = unpack( "i", byteString_numOfDocs)
    
    byteString_numOfChunks = ifh.read(4)
    (numOfChunks,) = unpack( "i", byteString_numOfChunks)
    
    byteString_numOfChunksLastBlock = ifh.read(4)
    (numOfChunksLastBlock,) = unpack( "i", byteString_numOfChunksLastBlock)
    
    byteString_numOfBlocks = ifh.read(4)
    (numOfBlocks,) = unpack( "i", byteString_numOfBlocks)
    
    byteString_blockNum = ifh.read(4)
    (blockNum,) = unpack( "i", byteString_blockNum)
    
    byteString_chunkNum = ifh.read(4)
    (chunkNum,) = unpack( "i", byteString_chunkNum)
    
    byteString_scoreThreshold = ifh.read(4)
    (scoreThreshold,) = unpack( "f", byteString_scoreThreshold)
    
    byteString_externalIndexOffset = ifh.read(8) # for the externalIndexOffset 
    
    numOfBytesRead += 4*2 + termLen + 4*9
    print termCounter,byteString_term,numOfDocs,scoreThreshold
    termCounter += 1

print "Overall:"
print "ifn:",ifn
ifh.close()
exit(1)
'''

# ifn = "/home/vgc/juanr/bigrams-xx-10.static"
ifn = "/home/vgc/juanr/bigrams-xx-10.dyn"
inputFileHandler0 = open(ifn,"rb")

statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    
    # file handler 0
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    print "static bigram"
    print "docID:",docID,score1
    for i in range(0,score1):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
        # print "----->",i,termID,static,dynamic,combined,score1
        print "----->",i,termID,static,dynamic,combined,score1
        # print "----->",i,termIDANDTermDict[termID],termID,static,dynamic,combined,score1
    if docID == 1:
        exit(1)
    numOfDocumentsProcessed += 1
    numOfBytesRead += 8 + score1 * 4 * 5
        
inputFileHandler0.close()
print "Ends."
exit(1)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/outputDirForIndexes/index_layered_debug_try_20140926.ext"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
numOfPostings = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (value1,value2,value3) = unpack( "3f", byteString)
    print value1,value2,value3
inputFileHandler0.close()
ofh.close()
print "Overall:"
print "ifn:",ifn
print "Ends."
exit(1)

x = [1,2,3,4,5,6,7,8,9,10,100]
y = [6.3064,10.1014,12.8673,14.2817,16.4307,17.7926,19.8339,22.5345,24.4905,24.6681,42.85]
plt.plot(x, y)
plt.show()
ifh.close()
print "Ends."
exit(1)

qidWithCandidateEvaluatedDict = {}
ifnList = []
'''
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_1%_TOP10_20140924"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_2%_TOP10_20140924"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_3%_TOP10_20140924"
ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_4%_TOP10_20140924"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_5%_TOP10_20140924"
ifn6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_6%_TOP10_20140924"
ifn7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_7%_TOP10_20140924"
ifn8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_8%_TOP10_20140924"
ifn9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_9%_TOP10_20140924"
ifn10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_10%_TOP10_20140924"
ifn11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_20%_TOP10_20140924"
ifn12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_30%_TOP10_20140924"
ifn13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_40%_TOP10_20140924"
ifn14 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_50%_TOP10_20140924"
ifn15 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_wand_index_layered_UPP-5_ptPowTo1_60%_TOP10_20140924"
'''

ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_1%_TOP100_20140925"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_2%_TOP100_20140925"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_3%_TOP100_20140925"
ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_4%_TOP100_20140925"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_5%_TOP100_20140925"
ifn6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_6%_TOP100_20140925"
ifn7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_7%_TOP100_20140925"
ifn8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_8%_TOP100_20140925"
ifn9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_9%_TOP100_20140925"
ifn10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_10%_TOP100_20140925"
ifn11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_20%_TOP100_20140925"
ifn12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_30%_TOP100_20140925"
ifn13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_40%_TOP100_20140925"
ifn14 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_50%_TOP100_20140925"
ifn15 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_daat-or_index_layered_UPP-5_ptPowTo1_60%_TOP100_20140925"


ifnList.append(ifn1)
ifnList.append(ifn2)
ifnList.append(ifn3)
ifnList.append(ifn4)
ifnList.append(ifn5)
ifnList.append(ifn6)
ifnList.append(ifn7)
ifnList.append(ifn8)
ifnList.append(ifn9)
ifnList.append(ifn10)
ifnList.append(ifn11)
ifnList.append(ifn12)
ifnList.append(ifn13)
ifnList.append(ifn14)
ifnList.append(ifn15)

for ifn in ifnList:
	ifh = open(ifn,"r")
	l = ifh.readline()
	qid = ""
	while l:
		if l.strip().startswith("qid"):
			qid = l.strip().split(" ")[1]
			if qid not in qidWithCandidateEvaluatedDict:
				qidWithCandidateEvaluatedDict[qid] = []
		if l.strip().startswith("WWW2015 Showing"):
			le = l.strip().split(" ")
			# print le[6][:-1]
			numOfCandidateEvaluated = int(le[6][:-1])
			qidWithCandidateEvaluatedDict[qid].append(numOfCandidateEvaluated)
		l = ifh.readline()
	ifh.close()
print "Overall:"
print "len(qidWithCandidateEvaluatedDict):",len(qidWithCandidateEvaluatedDict)
print "qidWithCandidateEvaluatedDict['95001']:",qidWithCandidateEvaluatedDict['95001']
print "qidWithCandidateEvaluatedDict['95002']:",qidWithCandidateEvaluatedDict['95002']
print "qidWithCandidateEvaluatedDict['95003']:",qidWithCandidateEvaluatedDict['95003']
print "qidWithCandidateEvaluatedDict['95004']:",qidWithCandidateEvaluatedDict['95004']
print "qidWithCandidateEvaluatedDict['95005']:",qidWithCandidateEvaluatedDict['95005']
exit(1)

# a new beginning for my life on 2014/09/25
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_max-score_index_layered_UPP-5_ptPowToDot1_90%_TOP10_20140925"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_max-score_index_layered_UPP-5_ptPowToDot1_80%_TOP10_20140925"
ifh = open(ifn,"r")
line = ifh.readline()
while line:
	if line.strip().startswith("qid:"):
		print line.strip()
		
	if line.strip().startswith("WWW2015"):
		print line.strip()
	
	line = ifh.readline()
ifh.close()
exit(1)

# check all the ready to time indexes
allTheReadyToTimeIndexes = []
# check things done in the layered index side.
counter = 0
for dirname, dirnames, filenames in os.walk('/san_data/research/wei/workspace/NYU_IRTK/data/outputDirForIndexes'):
	for filename in filenames:
	 	if filename.endswith(".meta"):
			# print "-->",filename,os.stat(os.path.join(dirname, filename)).st_size
			print "-->",filename,os.stat(os.path.join(dirname, filename)).st_size
			# print "-->",filename
			allTheReadyToTimeIndexes.append(filename.strip().split(".")[0][14:])
			counter += 1
			
print "Overall:"
print "counter:",counter
print "len(allTheReadyToTimeIndexes):",len(allTheReadyToTimeIndexes)
exit(1)

# check all the ready to time indexes
allTheReadyToTimeIndexes = []
# check things done in the layered index side.
counterUnfinished = 0
counterFinished = 0
for dirname, dirnames, filenames in os.walk('/home/vgc/wei/workspace/NYU_IRTK/data/outputDirForIndexes'):
	for filename in filenames:
	 	if filename.endswith(".meta"):
			# print "-->",filename,os.stat(os.path.join(dirname, filename)).st_size
			print "-->",filename
			allTheReadyToTimeIndexes.append(filename.strip().split(".")[0][14:])
			counterUnfinished += 1
			
for dirname, dirnames, filenames in os.walk('/san_data/research/wei/workspace/NYU_IRTK/data/outputDirForIndexes/unfinishedTimingIndexes'):
	for filename in filenames:
	 	if filename.endswith(".meta"):
			print "-->",filename,os.stat(os.path.join(dirname, filename)).st_size
			allTheReadyToTimeIndexes.append(filename.strip().split(".")[0][14:])
			counterUnfinished += 1

for dirname, dirnames, filenames in os.walk('/san_data/research/wei/workspace/NYU_IRTK/data/outputDirForIndexes/finishedTimingIndexes'):
	for filename in filenames:
	 	if filename.endswith(".meta"):
			print "-->",filename,os.stat(os.path.join(dirname, filename)).st_size
			allTheReadyToTimeIndexes.append(filename.strip().split(".")[0][14:])
			counterFinished += 1

print "counterUnfinished:",counterUnfinished
print "counterFinished:",counterFinished
print "total:",counterUnfinished + counterFinished
print "len(allTheReadyToTimeIndexes):",len(allTheReadyToTimeIndexes)
print "Intersection1:",allTheReadyToTimeIndexes

# find all the ready to do the layerify indexes
counter = 0
counter2 = 0
OKJobs = []
failJobs = []
# python check program.
for dirname, dirnames, filenames in os.walk('/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tonsOfIndexesMaking_20140920'):
    # print path to all subdirectories first.
    for subdirname in dirnames:
    	completeIFNPath = os.path.join(dirname, subdirname)
        fileToCheck1 = completeIFNPath + "/" + "index.dmap_basic"
        fileToCheck2 = completeIFNPath + "/" + "index.dmap_extended"
        fileToCheck3 = completeIFNPath + "/" + "index.idx.0.1"
        flag1 = False
        flag2 = False
    	flag3 = False
    	
    	checkFile1Size = 0
    	checkFile2Size = 0
    	
    	checkFile1LastModificationDate = 0
    	checkFile2LastModificationDate = 0
    	
    	if os.path.isfile(fileToCheck1):
    		checkFile1LastModificationDate = time.ctime(os.stat(fileToCheck1).st_mtime)
    		checkFile1Size = os.stat(fileToCheck1).st_size
    		if checkFile1Size != 0:
    			flag1 = True
		
		if os.path.isfile(fileToCheck2):
			checkFile2LastModificationDate = time.ctime(os.stat(fileToCheck2).st_mtime)
			checkFile2Size = os.stat(fileToCheck2).st_size
			if checkFile2Size != 0:
				flag2 = True
		
		if os.path.isfile(fileToCheck3):
			flag3 = True		
		
		'''
		if flag1 and flag2 and flag3:
			# print "checking",subdirname,"Need to merge"
			#outputCommand1 = 'cd "tonsOfIndexesMaking_20140920/' + subdirname + '/"'
			#print outputCommand1
			outputCommand1 = 'mv tonsOfIndexesMaking_20140920/' + subdirname + '/' + " " + "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tonsOfIndexesMaking_20140920/" + " &"
			print outputCommand1			
		'''
		
		
		if flag1 and flag2:
			# print "checking",subdirname,"Y"#,checkFile1LastModificationDate,checkFile2LastModificationDate
			methodIdentifier = subdirname.strip().split("_")[-3] + "_" + subdirname.strip().split("_")[-2] + "_" + subdirname.strip().split("PercentageIndex")[0] + "%"
			OKJobs.append(methodIdentifier)
			# print "Y"
			counter += 1
		else:
			counter2 += 1
			failJobs.append(subdirname)
			# print "N"

# python check program.
for dirname, dirnames, filenames in os.walk('/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tonsOfIndexesMaking_20140920'):
    # print path to all subdirectories first.
    for subdirname in dirnames:
    	completeIFNPath = os.path.join(dirname, subdirname)
        fileToCheck1 = completeIFNPath + "/" + "index.dmap_basic"
        fileToCheck2 = completeIFNPath + "/" + "index.dmap_extended"
        fileToCheck3 = completeIFNPath + "/" + "index.idx.0.1"
        flag1 = False
        flag2 = False
    	flag3 = False
    	
    	checkFile1Size = 0
    	checkFile2Size = 0
    	
    	checkFile1LastModificationDate = 0
    	checkFile2LastModificationDate = 0
    	
    	if os.path.isfile(fileToCheck1):
    		checkFile1LastModificationDate = time.ctime(os.stat(fileToCheck1).st_mtime)
    		checkFile1Size = os.stat(fileToCheck1).st_size
    		if checkFile1Size != 0:
    			flag1 = True
		
		if os.path.isfile(fileToCheck2):
			checkFile2LastModificationDate = time.ctime(os.stat(fileToCheck2).st_mtime)
			checkFile2Size = os.stat(fileToCheck2).st_size
			if checkFile2Size != 0:
				flag2 = True
		
		if os.path.isfile(fileToCheck3):
			flag3 = True		
		
		'''
		if flag1 and flag2 and flag3:
			# print "checking",subdirname,"Need to merge"
			#outputCommand1 = 'cd "tonsOfIndexesMaking_20140920/' + subdirname + '/"'
			#print outputCommand1
			outputCommand1 = 'mv tonsOfIndexesMaking_20140920/' + subdirname + '/' + " " + "/san_share/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tonsOfIndexesMaking_20140920/" + " &"
			print outputCommand1			
		'''
		
		
		if flag1 and flag2:
			# print "checking",subdirname,"Y"#,checkFile1LastModificationDate,checkFile2LastModificationDate
			methodIdentifier = subdirname.strip().split("_")[-3] + "_" + subdirname.strip().split("_")[-2] + "_" + subdirname.strip().split("PercentageIndex")[0] + "%"
			OKJobs.append(methodIdentifier)
			# print "Y"
			counter += 1
		else:
			counter2 += 1
			failJobs.append(subdirname)
			# print "N"

print "Intersection2:",len(OKJobs)
print OKJobs
print "Fail:",len(failJobs)
print failJobs

print "Overall:"
intersectionSet = set(allTheReadyToTimeIndexes).intersection(set(OKJobs))
leftOverSet = set(OKJobs) - intersectionSet
print "len(allTheReadyToTimeIndexes):",len(allTheReadyToTimeIndexes)
print "len(OKJobs):",len(OKJobs)
print "len(leftOverSet):",len(leftOverSet)
print leftOverSet
print "**********"
base = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tonsOfIndexesMaking_20140920/"
for name in leftOverSet:
	# print name
	newName = base + name.strip().split("_")[-1][:-1] + "PercentageIndex" + "_" + name.strip().split("_")[0] + "_" + name.strip().split("_")[1] + "_" + "20140920/"
	newCommandLine1 = 'cd ' + '"' + newName + '"'
	newCommandLine2 = './irtk --local --layerify index:0.0 ' + 'index_layered_' + name + ' --config-options=overlapping_layers=true\;num_layers=2\;layering_strategy=equal_2 --layerify > ' + '/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/log_layerify_' + name + ' &'
	print newCommandLine1
	print newCommandLine2
	print
print "**********"
exit(1)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_100%_postings_sortedByProimise_20140916"
ifh = open(ifn,"r")
l = ifh.readline()
lc = 1
counter = 0
while l:
	le = l.strip().split(" ")
	if le[0] == "23990334":
		counter += 1
	if lc == 64519480:
		break
	if lc % 1000000 == 0:
		print lc
	l = ifh.readline()
	lc += 1
ifh.close()
print "Overall:"
print "lc:",lc
print "counter:",counter
exit(1)

numOfPostings = 6451948010
print numOfPostings * 0.01
print numOfPostings * 0.02
print numOfPostings * 0.03
print numOfPostings * 0.04
print numOfPostings * 0.05
print numOfPostings * 0.06
print numOfPostings * 0.07
print numOfPostings * 0.08
print numOfPostings * 0.09
print numOfPostings * 0.1
print numOfPostings * 0.2
print numOfPostings * 0.3
print numOfPostings * 0.4
print numOfPostings * 0.5
print numOfPostings * 0.6
print numOfPostings * 0.7
print numOfPostings * 0.8
print numOfPostings * 0.9
exit(1)

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_100%_postings_sortedByProimise_20140916"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/results/unigramFromWei/gov2/allPostingPopped_GOV2_ourApproach_dynamicWeight_5_ptChangedFromJuan_ptPowTo1_originalPTopK_201407180"
inputFileHandler0 = open(ifn,"rb")

statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
numOfPostings = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
    (termID,docID,probablity,score1) = unpack( "2I2f", byteString)
    # print termID,docID,probablity,score1
    numOfPostings += 1
    ofh.write(str(termID) + " " + str(docID) + " " + str(probablity) + " " + str(score1) + "\n")
    if numOfPostings % 1000000 == 0:
    	print numOfPostings,"recorded."
inputFileHandler0.close()
ofh.close()
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
print "Ends."
exit(1)



ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/raw_results_OR_TOP10_UPP-5_1%_training95K"
ifh1 = open(ifn,"r")
l = ifh1.readline()
qid = ""
numOfResults = ""
outputLine = ""
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        # print l.strip()
        #print qid
        qid = le[1]
        outputLine += qid + " "
    if l.strip().startswith("WSDM2015 Showing"):
        #print l.strip()
        #print
        outputLine += le[6][:-1] + " " + le[-2][1:]
        print outputLine
        outputLine = ""   
    l = ifh1.readline()
exit(1)



ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/qidWithListLengthsAmongTwoTiers.arff"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
	le = l.strip().split(",")
	if len(le) == 23:
		qid = le[0]
		QPCCostTier1 = 0
		QPCCostTier2 = 0
		for i in range(1,11):
			if le[i] == "?":
				pass
			else:
				QPCCostTier1 += int(le[i])
		for i in range(11,22):
			if le[i] == "?":
				pass
			else:
				QPCCostTier2 += int(le[i])
		print qid,QPCCostTier1,QPCCostTier2
	l = ifh.readline()
ifh.close()
exit(1)

queryIDFallThroughActualGainCostList = []
thresholdsFallThroughActualGainCostList = []
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_gain2_cost2_extended_20140915_sortedByActualGainCost.txt"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
	le = l.strip().split(" ")
	queryID = le[0]
	actualGainCost = float(le[-1])
	queryIDFallThroughActualGainCostList.append(queryID)
	thresholdsFallThroughActualGainCostList.append(actualGainCost)
	l = ifh1.readline()
ifh1.close()

queryIDFallThroughPredictedGainCostList = []
thresholdsFallThroughPredictedGainCostList = []
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_gain2_cost2_extended_20140915_sortedByPredictedGainCost.txt"
ifh2 = open(ifn2,"r")
l = ifh2.readline()
while l:
	le = l.strip().split(" ")
	queryID = le[0]
	predictedGainCost = float(le[-2])
	queryIDFallThroughPredictedGainCostList.append(queryID)
	thresholdsFallThroughPredictedGainCostList.append(predictedGainCost)
	l = ifh2.readline()
ifh2.close()

print "Overall:"
print "len(queryIDFallThroughActualGainCostList):",len(queryIDFallThroughActualGainCostList)
print "len(queryIDFallThroughPredictedGainCostList):",len(queryIDFallThroughPredictedGainCostList)
print "len(thresholdsFallThroughActualGainCostList):",len(thresholdsFallThroughActualGainCostList)
print "len(thresholdsFallThroughPredictedGainCostList):",len(thresholdsFallThroughPredictedGainCostList)
print "ifn1:",ifn1
print "ifn2:",ifn2
ifh1.close()
ifh2.close()

percentageList = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
size = len(queryIDFallThroughActualGainCostList)
for percentage in percentageList:
	upperBound = int( size * percentage )
	intersectionSize = len( set(queryIDFallThroughActualGainCostList[:upperBound]).intersection(set(queryIDFallThroughPredictedGainCostList[:upperBound])) )
	unionSize = len( set(queryIDFallThroughActualGainCostList[:upperBound]).union(set(queryIDFallThroughPredictedGainCostList[:upperBound])))
	'''
	print "% of queries falling through:",percentage
	# print "intersectionSize:",intersectionSize
	# print "unionSize:",unionSize
	print "symmetric difference:",intersectionSize / unionSize
	print "actual gainCost threshold:",thresholdsFallThroughActualGainCostList[upperBound-1]
	print "predicted gainCost threshold:",thresholdsFallThroughPredictedGainCostList[upperBound-1]
	print
	'''
	print percentage,intersectionSize / unionSize,thresholdsFallThroughActualGainCostList[upperBound-1],thresholdsFallThroughPredictedGainCostList[upperBound-1]
exit(1)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/output_predictedGain_actualGain_20140915.txt"
ifh = open(ifn,"r")
ifh.close()
exit(1)

print "Begins..."
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_1%_queryTermsSamplingValues_20140912"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/Unpruned_100%_queryTermsSamplingValues_20140912"

ofn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_1%_queryTermsSamplingValues_20140915_fixed"
ofn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/Unpruned_100%_queryTermsSamplingValues_20140915_fixed"

ofh1 = open(ofn1,"w")
ofh2 = open(ofn2,"w")

ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
	le = l.strip().split(" ")
	if len(le) == 11:
		ofh1.write(l)
	else:
		# print l.strip()
		numOfValuesFixed = 11 - len(le)
		l = l.strip() + " "
		for i in range(0,numOfValuesFixed):
			l += "0.0" + " "
		l += "\n"
		ofh1.write(l)
		# print l.strip()
		# exit(1)
	l = ifh1.readline()

ifh2 = open(ifn2,"r")
l = ifh2.readline()
while l:
	le = l.strip().split(" ")
	print "len(le):",len(le)
	if len(le) == 11:
		ofh2.write(l)
	else:
		print l.strip()
		exit(1)
	l = ifh2.readline()

print "Overall:"
print "ifn1:",ifn1
print "ifn2:",ifn2
print "ofn1:",ofn1
print "ofn2:",ofn2
ifh1.close()
ifh2.close()
ofh1.close()
ofh2.close()
exit(1)

flag = False
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_20140915_step8_extended.arff"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
	le = l.strip().split(",")
	print "len(le):",len(le)
	if len(le) == 137:
		flag = True
	if len(le) != 137 and flag == True:
		print len(l.strip().split(","))
		print l.strip()
		exit(1)
	l = ifh.readline()
ifh.close()
exit(1)





ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/raw_results_OR_TOP10_Unpruned_100%_training95K_head_14K"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
	if l.strip().startswith("qid:"):
		print l.strip()
		l = ifh.readline()
		print l.strip()
		l = ifh.readline()
		print l.strip()
		print
	l = ifh.readline()
ifh.close()
exit(1)

termDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/Unpruned_100%_queryTermsSamplingValues_20140912"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    term = le[0]
    termDict[term] = 1
    l = ifh.readline()
ifh.close()
print "Overall:"
print "len(termDict):",len(termDict)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_1%_lexiconTermsSamplingValues_20140912"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    term = le[0]
    if term in termDict:
        print l.strip()
    l = ifh.readline()
ifh.close()
exit(1)

rankRecordList = [1,5,10,20,50,100,500,1000,5000,10000]
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_1%_termScoreDistribution_20140912"
ifh = open(ifn,"r")
l = ifh.readline()
currentRank = 0
outputLine = ""
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("Search:"):
        print outputLine
        outputLine = ""
        outputLine += le[1] + " "
        currentRank = 0
    if l.strip().startswith("Score:"):
        # print l.strip()
        currentRank += 1
        if currentRank in rankRecordList:
            outputLine += le[1].strip().split("\t")[0] + " "
            # print currentRank,
    l = ifh.readline()
exit(1)

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/debug_term_related"
ifh = open(ifn,"r")
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UPP-5_1%_term_polyIRToolkitCompatible"
ofh = open(ofn,"w")
l = ifh.readline()
termCounter = 0
while l:
    le = l.strip().split(" ")
    currentTerm = le[0]
    ofh.write( str(termCounter) + ":" + str(currentTerm) + "\n")
    l = ifh.readline()
    termCounter += 1
ofh.close()
ifh.close()
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
exit(1)

qidAndIntersectionSizeDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/intersectionSizeInTier1_trainingQueries_exhaustiveAND_20140912"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    qid = le[0]
    intersectionSize = int(le[-2])
    qidAndIntersectionSizeDict[qid] = intersectionSize
    l = ifh.readline()
ifh.close()
print "Overall:"
print "len(qidAndIntersectionSizeDict):",len(qidAndIntersectionSizeDict)

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_20140912_step7"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_20140912_step6"
ifh = open(ifn,"r")
ofh = open(ofn,"w")
for line in ifh.readlines():
    lineElements = line.strip().split(":")
    qid = lineElements[0]
    if qid in qidAndIntersectionSizeDict:
        completeOutputLine = line.strip() + ":" + str(qidAndIntersectionSizeDict[qid])
    else:
        completeOutputLine = line.strip() + ":" + "N/A"
    ofh.write(completeOutputLine.strip() + "\n")
ifh.close()
ofh.close()
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
exit(1)

histogramFreqHitDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP10_docHitRanks_20140902"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    hitFreq = int(le[1])
    if hitFreq not in histogramFreqHitDict:
        histogramFreqHitDict[hitFreq] = 1
    else:
        histogramFreqHitDict[hitFreq] += 1
    l = ifh.readline()
ifh.close()
print "Overall:"
for hitFreq in histogramFreqHitDict:
    print hitFreq,histogramFreqHitDict[hitFreq]
exit(1)

print "Begins..."
x = []
y = []
# 1%
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/1%/rawResults_us_20140910"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/1%/rawResults_post_20140910"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/1%/rawResults_office_20140910"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/1%/rawResults_locations_20140910"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/1%/rawResults_bronx_20140910"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/1%/rawResults_ny_20140910"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/1%/combined_curve_95001_20140910"
# full
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/full/rawResults_us_20140910"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/full/rawResults_post_20140910"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/full/rawResults_office_20140910"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/full/rawResults_locations_20140910"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/full/rawResults_bronx_20140910"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/full/rawResults_ny_20140910"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/full/rawResults_90001_20140910"


ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    if len(le) == 25 and le[-2].startswith("GX"):
        theRank = int(le[0])
        score = float(le[-4])
        x.append(theRank)
        y.append(score)

    l = ifh.readline()
print "len(x):",len(x)
print "len(y):",len(y)
plt.plot(x, y)
plt.legend()
plt.show()

ifh.close()
print "Ends."
exit(1)

trecIDAndNumOfPostingsDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/gov2_docID_trecID_numOfPostings"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 1
while l:
    le = l.strip().split(" ")
    trecID = le[1]
    numOfPostings = int(le[2])
    trecIDAndNumOfPostingsDict[trecID] = numOfPostings 
    l = ifh.readline()
    lineCounter += 1
    if lineCounter % 1000000 == 0:
        print lineCounter,"lines processed."
print "len(trecIDAndNumOfPostingsDict):",len(trecIDAndNumOfPostingsDict)
ifh.close()

# The 5K query case please.
trecIDWithBinValueDict = {}
trecIDHitDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP10_docHitRanks_20140902"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    trecID = le[0]
    currentHit = len(le) - 1
    trecIDHitDict[trecID] = currentHit
    l = ifh.readline()
ifh.close()
print "Overall:"
print "len(trecIDHitDict):",len(trecIDHitDict)

# score added
counter00 = 0
counter01 = 0
counter02 = 0
counter1 = 0
counter2 = 0
counter3 = 0
counter4 = 0
counter5 = 0
counter6 = 0
counter7 = 0
counter8 = 0

# # of docs added
counteraa = 0
counterab = 0
counterac = 0
counterb = 0
counterc = 0
counterd = 0
countere = 0
counterf = 0
counterg = 0
counterh = 0
counteri = 0

# posting added
countera1 = 0
countera2 = 0
countera3 = 0
counterb1 = 0
counterc1 = 0
counterd1 = 0
countere1 = 0
counterf1 = 0
counterg1 = 0
counterh1 = 0
counteri1 = 0

# num of TOP10 doc hits added
counterHitsA1 = 0
counterHitsA2 = 0
counterHitsA3 = 0
counterHitsB1 = 0
counterHitsC1 = 0
counterHitsD1 = 0
counterHitsE1 = 0
counterHitsF1 = 0
counterHitsG1 = 0
counterHitsH1 = 0
counterHitsI1 = 0


ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP1000_docHitScores_sortedByScore1_docSizeAdded_20140903"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 1
while l:
    le = l.strip().split(" ")
    trecID = le[0]
    # score1 = float(le[1])   # 1/rank
    score1 = float(le[2])   # 1/lg(rank)
    size = int(le[-1])
    lineCounter += 1
    currentHitFromTOP10 = 0
    if trecID in trecIDHitDict:
        currentHitFromTOP10 = trecIDHitDict[trecID]
    else:
        currentHitFromTOP10 = 0
    if score1 >= 0 and score1 < 0.2:
        binValue = 0
        counter00 += score1
        counteraa += 1
        countera1 += size
        counterHitsA1 += currentHitFromTOP10 
    if score1 >= 0.2 and score1 < 0.5:
        binValue = 1
        counter01 += score1
        counterab += 1
        countera2 += size
        counterHitsA2 += currentHitFromTOP10 
    if score1 >= 0.5 and score1 < 1:
        binValue = 2
        counter02 += score1
        counterac += 1
        countera3 += size
        counterHitsA3 += currentHitFromTOP10 
    if score1 >= 1 and score1 < 5:
        binValue = 3
        counter1 += score1
        counterb += 1
        counterb1 += size
        counterHitsB1 += currentHitFromTOP10
    if score1 >= 5 and score1 < 10:
        binValue = 4
        counter2 += score1
        counterc += 1
        counterc1 += size
        counterHitsC1 += currentHitFromTOP10
    if score1 >= 10 and score1 < 20:
        binValue = 5
        counter3 += score1
        counterd += 1
        counterd1 += size
        counterHitsD1 += currentHitFromTOP10
    if score1 >= 20 and score1 < 50:
        binValue = 6
        counter4 += score1
        countere += 1
        countere1 += size
        counterHitsE1 += currentHitFromTOP10
    if score1 >= 50 and score1 < 100:
        binValue = 7
        counter5 += score1
        counterf += 1
        counterf1 += size
        counterHitsF1 += currentHitFromTOP10
    if score1 >= 100 and score1 < 200:
        binValue = 8
        counter6 += score1
        counterg += 1
        counterg1 += size
        counterHitsG1 += currentHitFromTOP10
    if score1 >= 200 and score1 < 500:
        binValue = 9
        counter7 += score1
        counterh += 1
        counterh1 += size
        counterHitsH1 += currentHitFromTOP10
    if score1 >= 500:
        binValue = 10
        counter8 += score1
        counteri += 1
        counteri1 += size
        counterHitsI1 += currentHitFromTOP10
    trecIDWithBinValueDict[trecID] = binValue
    if lineCounter % 1000000 == 0:
        print "lineCounter:",lineCounter,"processed."

    l = ifh.readline()
ifh.close()

print "len(trecIDWithBinValueDict):",len(trecIDWithBinValueDict)
print

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP10/AND/trecIDHitsCollection_notSorted_AND_testingQueries_20140829_TOP10"
numOfDocumentsNotSeen = 0
# num of docs
tempCounter0 = 0
tempCounter1 = 0
tempCounter2 = 0
tempCounter3 = 0
tempCounter4 = 0
tempCounter5 = 0
tempCounter6 = 0
tempCounter7 = 0
tempCounter8 = 0
tempCounter9 = 0
tempCounter10 = 0

# num of hits
tempCountera = 0
tempCounterb = 0
tempCounterc = 0
tempCounterd = 0
tempCountere = 0
tempCounterf = 0
tempCounterg = 0
tempCounterh = 0
tempCounteri = 0
tempCounterj = 0
tempCounterk = 0

# num of postings
totalNumOfPostings0 = 0
totalNumOfPostings1 = 0
totalNumOfPostings2 = 0
totalNumOfPostings3 = 0
totalNumOfPostings4 = 0
totalNumOfPostings5 = 0
totalNumOfPostings6 = 0
totalNumOfPostings7 = 0
totalNumOfPostings8 = 0
totalNumOfPostings9 = 0
totalNumOfPostings10 = 0

currIFH = open(ifn,"r")
l = currIFH.readline()
while l:
    le = l.strip().split(" ")
    currTrecID = le[0]
    currTrecIDHitFreq = int(le[1])
    if currTrecID in trecIDWithBinValueDict:
        currBinValue = trecIDWithBinValueDict[currTrecID]
        if currBinValue == 0:
            tempCounter0 += 1
            tempCountera += currTrecIDHitFreq
            totalNumOfPostings0 += trecIDAndNumOfPostingsDict[currTrecID]
        if currBinValue == 1:
            tempCounter1 += 1
            tempCounterb += currTrecIDHitFreq
            totalNumOfPostings1 += trecIDAndNumOfPostingsDict[currTrecID]
        if currBinValue == 2:
            tempCounter2 += 1
            tempCounterc += currTrecIDHitFreq
            totalNumOfPostings2 += trecIDAndNumOfPostingsDict[currTrecID]
        if currBinValue == 3:
            tempCounter3 += 1
            tempCounterd += currTrecIDHitFreq
            totalNumOfPostings3 += trecIDAndNumOfPostingsDict[currTrecID]
        if currBinValue == 4:
            tempCounter4 += 1
            tempCountere += currTrecIDHitFreq
            totalNumOfPostings4 += trecIDAndNumOfPostingsDict[currTrecID]
        if currBinValue == 5:
            tempCounter5 += 1
            tempCounterf += currTrecIDHitFreq
            totalNumOfPostings5 += trecIDAndNumOfPostingsDict[currTrecID]
        if currBinValue == 6:
            tempCounter6 += 1
            tempCounterg += currTrecIDHitFreq
            totalNumOfPostings6 += trecIDAndNumOfPostingsDict[currTrecID]
        if currBinValue == 7:
            tempCounter7 += 1
            tempCounterh += currTrecIDHitFreq
            totalNumOfPostings7 += trecIDAndNumOfPostingsDict[currTrecID]
        if currBinValue == 8:
            tempCounter8 += 1
            tempCounteri += currTrecIDHitFreq
            totalNumOfPostings8 += trecIDAndNumOfPostingsDict[currTrecID]                            
        if currBinValue == 9:
            tempCounter9 += 1
            tempCounterj += currTrecIDHitFreq
            totalNumOfPostings9 += trecIDAndNumOfPostingsDict[currTrecID]
        if currBinValue == 10:
            tempCounter10 += 1
            tempCounterk += currTrecIDHitFreq
            totalNumOfPostings10 += trecIDAndNumOfPostingsDict[currTrecID]
    else:
        numOfDocumentsNotSeen += 1
    l = currIFH.readline()

print "Overall:"
print "[a):",tempCounter0,tempCountera,totalNumOfPostings0
print "[b):",tempCounter1,tempCounterb,totalNumOfPostings1
print "[c):",tempCounter2,tempCounterc,totalNumOfPostings2
print "[d):",tempCounter3,tempCounterd,totalNumOfPostings3
print "[e):",tempCounter4,tempCountere,totalNumOfPostings4
print "[f):",tempCounter5,tempCounterf,totalNumOfPostings5
print "[g):",tempCounter6,tempCounterg,totalNumOfPostings6
print "[h):",tempCounter7,tempCounterh,totalNumOfPostings7
print "[i):",tempCounter8,tempCounteri,totalNumOfPostings8
print "[j):",tempCounter9,tempCounterj,totalNumOfPostings9
print "[k):",tempCounter10,tempCounterk,totalNumOfPostings10

sum1 = tempCounter0 + tempCounter1 + tempCounter2 + tempCounter3 + tempCounter4 + tempCounter5 + tempCounter6 + tempCounter7 + tempCounter8 + tempCounter9 + tempCounter10
sum2 = tempCountera + tempCounterb + tempCounterc + tempCounterd + tempCountere + tempCounterf + tempCounterg + tempCounterh + tempCounteri + tempCounterj + tempCounterk
sum3 = totalNumOfPostings0 + totalNumOfPostings1 + totalNumOfPostings2 + totalNumOfPostings3 + totalNumOfPostings4 + totalNumOfPostings5 + totalNumOfPostings6 + totalNumOfPostings7 + totalNumOfPostings8 + totalNumOfPostings9 + totalNumOfPostings10
print "ALL:",sum1,sum2,sum3
print "numOfDocumentsNotSeen:",numOfDocumentsNotSeen
print
ifh.close()

print "Overall:"
print "*****score binned"
print counter00
print counter01
print counter02
print counter1
print counter2
print counter3
print counter4
print counter5
print counter6
print counter7
print counter8
print "*****# of docs"
print counteraa
print counterab
print counterac
print counterb
print counterc
print counterd
print countere
print counterf
print counterg
print counterh
print counteri
print "*****postings"
print countera1
print countera2
print countera3
print counterb1
print counterc1
print counterd1
print countere1
print counterf1
print counterg1
print counterh1
print counteri1
print "*****# of TOP10 doc hits"
print counterHitsA1
print counterHitsA2
print counterHitsA3
print counterHitsB1
print counterHitsC1
print counterHitsD1
print counterHitsE1
print counterHitsF1
print counterHitsG1
print counterHitsH1
print counterHitsI1
exit(1)

trecIDAndNumOfPostingsDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/gov2_docID_trecID_numOfPostings"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 1
while l:
    le = l.strip().split(" ")
    trecID = le[1]
    numOfPostings = int(le[2])
    trecIDAndNumOfPostingsDict[trecID] = numOfPostings 
    l = ifh.readline()
    lineCounter += 1
    if lineCounter % 1000000 == 0:
        print lineCounter,"lines processed."
print "len(trecIDAndNumOfPostingsDict):",len(trecIDAndNumOfPostingsDict)
ifh.close()

tempCounter0 = 0
tempCounter1 = 0
tempCounter2 = 0
tempCounter3 = 0
tempCounter4 = 0
tempCounter5 = 0
tempCounter6 = 0
tempCounter7 = 0
tempCounter8 = 0

tempCountera = 0
tempCounterb = 0
tempCounterc = 0
tempCounterd = 0
tempCountere = 0
tempCounterf = 0
tempCounterg = 0
tempCounterh = 0
tempCounteri = 0

totalNumOfPostings0 = 0
totalNumOfPostings1 = 0
totalNumOfPostings2 = 0
totalNumOfPostings3 = 0
totalNumOfPostings4 = 0
totalNumOfPostings5 = 0
totalNumOfPostings6 = 0
totalNumOfPostings7 = 0
totalNumOfPostings8 = 0

trecIDWithBinValueDict = {}

# TOP1000 AND
# ONLY have 3 files
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP1000/AND/trecIDHitsCollection_notSorted_AND_fakeQueries_20140826"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP1000/AND/trecIDHitsCollection_notSorted_AND_95KTrainingQueries_20140826"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP1000/AND/trecIDHitsCollection_notSorted_TOP1000_AND_testingQueries_20140828"

# TOP1000 OR
#ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP1000/OR/trecIDHitsCollection_notSorted_OR_fakeQueries_20140828"
#ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP1000/OR/trecIDHitsCollection_notSorted_OR_xau_heldOutFakeQueries_20140828"
#ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP1000/OR/trecIDHitsCollection_notSorted_OR_95KTrainingQueries_20140829"
#ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP1000/OR/trecIDHitsCollection_notSorted_OR_testingQueries_20140826"

# TOP100 AND
#ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP100/AND/trecIDHitsCollection_notSorted_AND_9.9MFakeQueries_20140829_TOP100"
#ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP100/AND/trecIDHitsCollection_notSorted_AND_0.1MFakeQueries_20140829_TOP100"
#ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP100/AND/trecIDHitsCollection_notSorted_AND_95KTrainingQueries_20140829_TOP100"
#ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP100/AND/trecIDHitsCollection_notSorted_AND_testingQueries_20140829_TOP100"

# TOP100 OR
#ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP100/OR/trecIDHitsCollection_notSorted_OR_9.9MFakeQueries_20140829_TOP100"
#ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP100/OR/trecIDHitsCollection_notSorted_OR_0.1MFakeQueries_20140829_TOP100"
#fn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP100/OR/trecIDHitsCollection_notSorted_OR_95KTrainingQueries_20140829_TOP100"
#ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP100/OR/trecIDHitsCollection_notSorted_OR_testingQueries_20140829_TOP100"

# TOP10 AND
#ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP10/AND/trecIDHitsCollection_notSorted_AND_9.9MFakeQueries_20140829_TOP10" 
#ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP10/AND/trecIDHitsCollection_notSorted_AND_0.1MFakeQueries_20140829_TOP10"
#ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP10/AND/trecIDHitsCollection_notSorted_AND_95KTrainingQueries_20140829_TOP10"
#ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP10/AND/trecIDHitsCollection_notSorted_AND_testingQueries_20140829_TOP10"

# TOP10 OR
#ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP10/OR/trecIDHitsCollection_notSorted_OR_9.9MFakeQueries_20140829_TOP10"
#ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP10/OR/trecIDHitsCollection_notSorted_OR_0.1MFakeQueries_20140829_TOP10"
#ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP10/OR/trecIDHitsCollection_notSorted_OR_95KTrainingQueries_20140829_TOP10"
#ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/TOP10/OR/trecIDHitsCollection_notSorted_OR_testingQueries_20140829_TOP10"

ifh1 = open(ifn1,"r")
l = ifh1.readline()
while l:
    le = l.strip().split(" ")
    currTrecID = le[0]
    currTrecIDHitFreq = int(le[1])
    if currTrecIDHitFreq >= 1 and currTrecIDHitFreq < 10:
        binValue = 0
        tempCounter0 += 1
        tempCountera += currTrecIDHitFreq
        totalNumOfPostings0 += trecIDAndNumOfPostingsDict[currTrecID]
    if currTrecIDHitFreq >= 10 and currTrecIDHitFreq < 50:
        binValue = 1
        tempCounter1 += 1
        tempCounterb += currTrecIDHitFreq
        totalNumOfPostings1 += trecIDAndNumOfPostingsDict[currTrecID]
    if currTrecIDHitFreq >= 50 and currTrecIDHitFreq < 100:
        binValue = 2
        tempCounter2 += 1
        tempCounterc += currTrecIDHitFreq
        totalNumOfPostings2 += trecIDAndNumOfPostingsDict[currTrecID]
    if currTrecIDHitFreq >= 100 and currTrecIDHitFreq < 500:
        binValue = 3
        tempCounter3 += 1
        tempCounterd += currTrecIDHitFreq
        totalNumOfPostings3 += trecIDAndNumOfPostingsDict[currTrecID]
    if currTrecIDHitFreq >= 500 and currTrecIDHitFreq < 1000:
        binValue = 4
        tempCounter4 += 1
        tempCountere += currTrecIDHitFreq
        totalNumOfPostings4 += trecIDAndNumOfPostingsDict[currTrecID]
    if currTrecIDHitFreq >= 1000 and currTrecIDHitFreq < 5000:
        binValue = 5
        tempCounter5 += 1
        tempCounterf += currTrecIDHitFreq
        totalNumOfPostings5 += trecIDAndNumOfPostingsDict[currTrecID]
    if currTrecIDHitFreq >= 5000 and currTrecIDHitFreq < 10000:
        binValue = 6
        tempCounter6 += 1
        tempCounterg += currTrecIDHitFreq
        totalNumOfPostings6 += trecIDAndNumOfPostingsDict[currTrecID]
    if currTrecIDHitFreq >= 10000 and currTrecIDHitFreq < 50000:
        binValue = 7
        tempCounter7 += 1
        tempCounterh += currTrecIDHitFreq
        totalNumOfPostings7 += trecIDAndNumOfPostingsDict[currTrecID]
    if currTrecIDHitFreq >= 50000:
        binValue = 8
        tempCounter8 += 1
        tempCounteri += currTrecIDHitFreq
        totalNumOfPostings8 += trecIDAndNumOfPostingsDict[currTrecID]
    trecIDWithBinValueDict[currTrecID] = binValue
    #if len(trecIDWithBinValueDict) % 1000000 == 0:
    #    print "len(trecIDWithBinValueDict):",len(trecIDWithBinValueDict)
    #    break
    l = ifh1.readline()
ifh1.close()

print "Overall:"
print "[1,10):",tempCounter0,tempCountera,totalNumOfPostings0
print "[10,50):",tempCounter1,tempCounterb,totalNumOfPostings1
print "[50,100):",tempCounter2,tempCounterc,totalNumOfPostings2
print "[100,500):",tempCounter3,tempCounterd,totalNumOfPostings3
print "[500,1000):",tempCounter4,tempCountere,totalNumOfPostings4
print "[1000,5000):",tempCounter5,tempCounterf,totalNumOfPostings5
print "[5000,10000):",tempCounter6,tempCounterg,totalNumOfPostings6
print "[10000,50000):",tempCounter7,tempCounterh,totalNumOfPostings7
print "[50000):",tempCounter8,tempCounteri,totalNumOfPostings8
sum1 = tempCounter0 + tempCounter1 + tempCounter2 + tempCounter3 + tempCounter4 + tempCounter5 + tempCounter6 + tempCounter7 + tempCounter8
sum2 = tempCountera + tempCounterb + tempCounterc + tempCounterd + tempCountere + tempCounterf + tempCounterg + tempCounterh + tempCounteri
sum3 = totalNumOfPostings0 + totalNumOfPostings1 + totalNumOfPostings2 + totalNumOfPostings3 + totalNumOfPostings4 + totalNumOfPostings5 + totalNumOfPostings6 + totalNumOfPostings7 + totalNumOfPostings8
print "ALL:",sum1,sum2,sum3

print "len(trecIDWithBinValueDict):",len(trecIDWithBinValueDict)
print

ifnList = []
ifnList.append(ifn2)
ifnList.append(ifn3)
# ifnList.append(ifn4)
for currIFN in ifnList:
    numOfDocumentsNotSeen = 0
    tempCounter0 = 0
    tempCounter1 = 0
    tempCounter2 = 0
    tempCounter3 = 0
    tempCounter4 = 0
    tempCounter5 = 0
    tempCounter6 = 0
    tempCounter7 = 0
    tempCounter8 = 0
    
    tempCountera = 0
    tempCounterb = 0
    tempCounterc = 0
    tempCounterd = 0
    tempCountere = 0
    tempCounterf = 0
    tempCounterg = 0
    tempCounterh = 0
    tempCounteri = 0
    
    totalNumOfPostings0 = 0
    totalNumOfPostings1 = 0
    totalNumOfPostings2 = 0
    totalNumOfPostings3 = 0
    totalNumOfPostings4 = 0
    totalNumOfPostings5 = 0
    totalNumOfPostings6 = 0
    totalNumOfPostings7 = 0
    totalNumOfPostings8 = 0
    
    currIFH = open(currIFN,"r")
    l = currIFH.readline()
    while l:
        le = l.strip().split(" ")
        currTrecID = le[0]
        currTrecIDHitFreq = int(le[1])
        if currTrecID in trecIDWithBinValueDict:
            currBinValue = trecIDWithBinValueDict[currTrecID]
            if currBinValue == 0:
                tempCounter0 += 1
                tempCountera += currTrecIDHitFreq
                totalNumOfPostings0 += trecIDAndNumOfPostingsDict[currTrecID]
            if currBinValue == 1:
                tempCounter1 += 1
                tempCounterb += currTrecIDHitFreq
                totalNumOfPostings1 += trecIDAndNumOfPostingsDict[currTrecID]
            if currBinValue == 2:
                tempCounter2 += 1
                tempCounterc += currTrecIDHitFreq
                totalNumOfPostings2 += trecIDAndNumOfPostingsDict[currTrecID]
            if currBinValue == 3:
                tempCounter3 += 1
                tempCounterd += currTrecIDHitFreq
                totalNumOfPostings3 += trecIDAndNumOfPostingsDict[currTrecID]
            if currBinValue == 4:
                tempCounter4 += 1
                tempCountere += currTrecIDHitFreq
                totalNumOfPostings4 += trecIDAndNumOfPostingsDict[currTrecID]
            if currBinValue == 5:
                tempCounter5 += 1
                tempCounterf += currTrecIDHitFreq
                totalNumOfPostings5 += trecIDAndNumOfPostingsDict[currTrecID]
            if currBinValue == 6:
                tempCounter6 += 1
                tempCounterg += currTrecIDHitFreq
                totalNumOfPostings6 += trecIDAndNumOfPostingsDict[currTrecID]
            if currBinValue == 7:
                tempCounter7 += 1
                tempCounterh += currTrecIDHitFreq
                totalNumOfPostings7 += trecIDAndNumOfPostingsDict[currTrecID]
            if currBinValue == 8:
                tempCounter8 += 1
                tempCounteri += currTrecIDHitFreq
                totalNumOfPostings8 += trecIDAndNumOfPostingsDict[currTrecID]                            
        else:
            numOfDocumentsNotSeen += 1
        l = currIFH.readline()

    print "Overall:"
    print "[1,10):",tempCounter0,tempCountera,totalNumOfPostings0
    print "[10,50):",tempCounter1,tempCounterb,totalNumOfPostings1
    print "[50,100):",tempCounter2,tempCounterc,totalNumOfPostings2
    print "[100,500):",tempCounter3,tempCounterd,totalNumOfPostings3
    print "[500,1000):",tempCounter4,tempCountere,totalNumOfPostings4
    print "[1000,5000):",tempCounter5,tempCounterf,totalNumOfPostings5
    print "[5000,10000):",tempCounter6,tempCounterg,totalNumOfPostings6
    print "[10000,50000):",tempCounter7,tempCounterh,totalNumOfPostings7
    print "[50000):",tempCounter8,tempCounteri,totalNumOfPostings8
    sum1 = tempCounter0 + tempCounter1 + tempCounter2 + tempCounter3 + tempCounter4 + tempCounter5 + tempCounter6 + tempCounter7 + tempCounter8
    sum2 = tempCountera + tempCounterb + tempCounterc + tempCounterd + tempCountere + tempCounterf + tempCounterg + tempCounterh + tempCounteri
    sum3 = totalNumOfPostings0 + totalNumOfPostings1 + totalNumOfPostings2 + totalNumOfPostings3 + totalNumOfPostings4 + totalNumOfPostings5 + totalNumOfPostings6 + totalNumOfPostings7 + totalNumOfPostings8
    print "ALL:",sum1,sum2,sum3
    print "numOfDocumentsNotSeen:",numOfDocumentsNotSeen
    print
exit(1)



ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TOP10_documentResults_OR_UPP-5_1%_20140908"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/raw_results_OR_TOP10_UPP-5_1%_training95K"
ifh = open(ifn,"r")

print "ifn:",ifn
print "ofn:",ofn

ifh = open(ifn,"r")
l = ifh.readline()
qid = ""
docID = ""
trecID = ""
theRank = ""
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid:"):
        qid = le[1]
    if len(le) == 25 and le[-2].startswith("GX") and int(le[0]) <= 10:
        docID = str(le[-3])
        trecID = str(le[-2])
        theRank = str(le[0])
        print qid,theRank,docID,trecID
        ofh.write(qid + " " + str(theRank) + " " + docID + " " + trecID + "\n")
    l = ifh.readline()

ifh.close()
ofh.close()
print "Overall:"
print "ofn:",ofn
exit(1)

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TOP10_documentResults_OR_unpruned_20140908"
ofh = open(ofn,"w")

ifnNameList = []
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xaa_head95K_OR_rawResult_20140828"
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xab_head95K_OR_rawResult_20140828"
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xac_head95K_OR_rawResult_20140828"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xad_head95K_OR_rawResult_20140828"
ifn4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xae_head95K_OR_rawResult_20140828"
ifn5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xaf_head95K_OR_rawResult_20140828"
ifn6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xag_head95K_OR_rawResult_20140828"
ifn7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xah_head95K_OR_rawResult_20140828"
ifn8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xai_head95K_OR_rawResult_20140828"
ifn9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xaj_head95K_OR_rawResult_20140828"
ifn10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xak_head95K_OR_rawResult_20140828"
ifn11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xal_head95K_OR_rawResult_20140828"
ifn12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xam_head95K_OR_rawResult_20140828"
ifn13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xan_head95K_OR_rawResult_20140828"
ifn14 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xao_head95K_OR_rawResult_20140828"
ifn15 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xap_head95K_OR_rawResult_20140828"
ifn16 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xaq_head95K_OR_rawResult_20140828"
ifn17 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xar_head95K_OR_rawResult_20140828"
ifn18 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/95KTrainingQueries/xas_head95K_OR_rawResult_20140828"
ifnNameList.append(ifn0)
ifnNameList.append(ifn1)
ifnNameList.append(ifn2)
ifnNameList.append(ifn3)
ifnNameList.append(ifn4)
ifnNameList.append(ifn5)
ifnNameList.append(ifn6)
ifnNameList.append(ifn7)
ifnNameList.append(ifn8)
ifnNameList.append(ifn9)
ifnNameList.append(ifn10)
ifnNameList.append(ifn11)
ifnNameList.append(ifn12)
ifnNameList.append(ifn13)
ifnNameList.append(ifn14)
ifnNameList.append(ifn15)
ifnNameList.append(ifn16)
ifnNameList.append(ifn17)
ifnNameList.append(ifn18)

for ifn in ifnNameList:
    print "ifn:",ifn
    ifh = open(ifn,"r")
    l = ifh.readline()
    qid = ""
    docID = ""
    trecID = ""
    theRank = ""
    while l:
        le = l.strip().split(" ")
        if l.strip().startswith("qid:"):
            qid = le[1]
        if len(le) == 25 and le[-2].startswith("GX") and int(le[0]) <= 10:
            docID = str(le[-3])
            trecID = str(le[-2])
            theRank = str(le[0])
            ofh.write(qid + " " + str(theRank) + " " + docID + " " + trecID + "\n")
        l = ifh.readline()
    ifh.close()

ifh.close()
ofh.close()
print "Overall:"
print "ofn:",ofn
exit(1)

ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/actualRunning_UPP-5_1%_20140908_godBlessWei"
ifh2 = open(ifn2,"r")
l = ifh2.readline()
qid = ""
documentResultKey = ""
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        print qid
        qid = le[1]
    if len(le) == 25 and le[-2].startswith("GX") and int(le[0]) <= 10:
        currentDocID = le[-3]
        documentResultKey = qid + "_" + currentDocID
        if currentDocID not in top10DocIDDictFromUPP5Method:
            top10DocIDDictFromUPP5Method[documentResultKey] = 1
        else:
            top10DocIDDictFromUPP5Method[documentResultKey] += 1
        # print l.strip()
    l = ifh2.readline()

print "Overall:"
intersectionSize = len( set(top10DocIDDictFromGoldStandard).intersection(set(top10DocIDDictFromUPP5Method)) )
print "intersectionSize:",intersectionSize
print "rate:",intersectionSize / len(top10DocIDDictFromGoldStandard)
ifh1.close()
ifh2.close()
exit(1)

top10DocIDDictFromGoldStandard = {}
top10DocIDDictFromUPP5Method = {}

ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/queryProcessingCostAnalysis/gov2/rawResults_50%_TOP1000_OR_20140126Night"
ifh1 = open(ifn1,"r")
l = ifh1.readline()
qid = ""
documentResultKey = ""
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        print qid
        qid = le[1]
    if len(le) == 25 and le[-2].startswith("GX") and int(le[0]) <= 10:
        currentDocID = le[-3]
        documentResultKey = qid + "_" + currentDocID
        if documentResultKey not in top10DocIDDictFromGoldStandard:
            top10DocIDDictFromGoldStandard[documentResultKey] = 1
        else:
            top10DocIDDictFromGoldStandard[documentResultKey] += 1
        # print l.strip()
    l = ifh1.readline()
print "len(top10DocIDDictFromGoldStandard):",len(top10DocIDDictFromGoldStandard)

ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/actualRunning_UPP-5_1%_20140908_godBlessWei"
ifh2 = open(ifn2,"r")
l = ifh2.readline()
qid = ""
documentResultKey = ""
while l:
    le = l.strip().split(" ")
    if l.strip().startswith("qid"):
        print qid
        qid = le[1]
    if len(le) == 25 and le[-2].startswith("GX") and int(le[0]) <= 10:
        currentDocID = le[-3]
        documentResultKey = qid + "_" + currentDocID
        if currentDocID not in top10DocIDDictFromUPP5Method:
            top10DocIDDictFromUPP5Method[documentResultKey] = 1
        else:
            top10DocIDDictFromUPP5Method[documentResultKey] += 1
        # print l.strip()
    l = ifh2.readline()

print "Overall:"
intersectionSize = len( set(top10DocIDDictFromGoldStandard).intersection(set(top10DocIDDictFromUPP5Method)) )
print "intersectionSize:",intersectionSize
print "rate:",intersectionSize / len(top10DocIDDictFromGoldStandard)
ifh1.close()
ifh2.close()
exit(1)


termDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/debug_term_related"
ifh0 = open(ifn0,"r")
l = ifh0.readline()
while l:
    le = l.strip().split(" ")
    term = le[0]
    termDict[term] = 1
    l = ifh0.readline()
print "len(termDict):",len(termDict)
ifh0.close()

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/gov2_UPP-5_1%_LexiconTermID_Term_ListLength_sortedByAlphabeticalOrder"
ofh = open(ofn,"w")
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/gov2_wholeLexiconTermID_Term_ListLength_sortedByAlphabeticalOrder"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    term = le[1]
    if term in termDict:
        print l.strip()
        ofh.write(l)
    l = ifh.readline()
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
ofh.close()
exit(1)






# AND, result kept vs. % of size
#x = np.array([0.3035,0.4255,0.5584,0.6886,0.8131,0.9127])
#y = np.array([0.3,0.4,0.5,0.6,0.7,0.8])

# AND, result kept vs. QPC
#x = np.array([0.3035,0.4255,0.5584,0.6886,0.8131,0.9127])
#y = np.array([0.1460,0.2390,0.3479,0.4724,0.6035,0.7503])

# OR, result kept vs. % of size
# x = np.array([0.3816,0.5369,0.6978,0.8437,0.9283])
# y = np.array([0.5,0.6,0.7,0.8,0.9])

# OR, result kept vs. QPC
x = np.array([0.3816,0.5369,0.6978,0.8437,0.9283])
y = np.array([0.3479,0.4724,0.6035,0.7503,0.9058])

A = np.vstack([x, np.ones(len(x))]).T
m, c = np.linalg.lstsq(A, y)[0]
print m, c

x = 0.4
print x,m*x + c
x = 0.5
print x,m*x + c
x = 0.6
print x,m*x + c
x = 0.7
print x,m*x + c
x = 0.8
print x,m*x + c
x = 0.9
print x,m*x + c

'''
# import matplotlib.pyplot as plt
plt.plot(x, y, 'o', label='Original data', markersize=10)
plt.plot(x, m*x + c, 'r', label='Fitted line')
plt.legend()
plt.show()
'''
exit(1)

'''
1%
3%
5%
10%
15%
20%
30%
40%
50%
60%
70%
80%
90%
100%
'''
percentageCostDict = {}
percentageCostDict['1%'] = 171048091
percentageCostDict['3%'] = 565262583
percentageCostDict['5%'] = 1017065458
percentageCostDict['10%'] = 2390796702
percentageCostDict['15%'] = 4076504567
percentageCostDict['20%'] = 6246996029
percentageCostDict['30%'] = 10234347287
percentageCostDict['40%'] = 16757492535
percentageCostDict['50%'] = 24390650531
percentageCostDict['60%'] = 33117581898
percentageCostDict['70%'] = 42312652906
percentageCostDict['80%'] = 52599555035
percentageCostDict['90%'] = 63502098839
percentageCostDict['100%'] = 70108204014


# QPM = ["AND","OR"]
QPM = ["OR"]
# QPM = ["AND"]
fileNameList = ["1%","3%","5%","10%","15%","20%","30%","40%","50%","60%","70%","80%","90%","100%"]
ifnBasePart1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/xDoc_documentAllocationRevisited_since20140904/final/raw_xDoc_firstTierGain_"

ifn = ""
costD = 70108204014
for part2 in QPM:
    for part1 in fileNameList:
        ifn = ifnBasePart1 + part1 + "_" + part2 + "_extended_sorted"
        print "ifn:",ifn
        firstTierPercentageStr = part1
        costN = percentageCostDict[firstTierPercentageStr]
        
        print "1st tier size:",firstTierPercentageStr
        print "2ed tier size:","100%"
        ifh = open(ifn,"r")
        l = ifh.readline()
        qualityN = 0
        qualityD = 0
        beginQuality = 0
        beginCost = 0
        while l:
            le = l.strip().split(" ")
            qualityN += int(le[1])
            qualityD += int(le[3])
            l = ifh.readline()
        beginQuality = qualityN/qualityD
        beginCost = costN/costD
        #print "Overall:"
        #print "beginQuality:",beginQuality
        #print "beginCost:",beginCost
        #print
        #print "qualityN:",qualityN
        #print "qualityD:",qualityD
        #print "costN:",costN
        #print "costD:",costD
        #print "N/A",str(beginQuality),str(beginCost),"0","0.0"
        ifh.close()
        
        # 2ed pass
        NUM_OF_QUERIS = 4981
        targetQuality = [0.4,0.5,0.6,0.7,0.8,0.9]
        currQuality = beginQuality
        currCost = beginCost
        ifh = open(ifn,"r")
        l = ifh.readline()
        numOfQueriesFallingThrough = 1
        for currTargetValue in targetQuality:
            if currQuality > currTargetValue:
                pass
            else:
                
                while l:
                    le = l.strip().split(" ")
                    qualityN += int(le[3]) - int(le[1])
                    costN += int(le[4])
                    l = ifh.readline()
                    numOfQueriesFallingThrough += 1
                    currQuality = qualityN/qualityD
                    currCost = costN/costD
                    if currQuality > currTargetValue:
                        #print "currTargetValue:",currTargetValue
                        #print "currQuality:",currQuality
                        #print "currCost:",currCost
                        #print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough
                        #print "%OfQueriesFallingThrough:",numOfQueriesFallingThrough/NUM_OF_QUERIS
                        #print
                        print currTargetValue,currQuality,currCost,numOfQueriesFallingThrough,numOfQueriesFallingThrough/NUM_OF_QUERIS,firstTierPercentageStr,"100%",part1,beginQuality,beginCost,currQuality-beginQuality,currCost-beginCost
                        break
        print
        ifh.close()
exit(1)

mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/xDoc_documentAllocationRevisited_since20140904/"
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
print len(f)
for ifn in f:
    if ifn.endswith("_extended"):
        path = mypath + ifn
        outputPath = path + "_sorted"
        print "sort --key=6 -g -r",path,">",outputPath,"&"
exit(1)


qidWithFullCostDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/final/ptPowTo0_step3_final_19"
ifh = open(ifn,"r")
for l in ifh.readlines():
    le = l.strip().split(" ")
    qid = le[0]
    currentCost = int(le[4])
    qidWithFullCostDict[qid] = currentCost
ifh.close()
print "Overall:"
print "len(qidWithFullCostDict):",len(qidWithFullCostDict)

mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/xDoc_documentAllocationRevisited_since20140904/"
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
print len(f)
for ifn in f:
    path = mypath + ifn
    print path
    outputPath = path + "_" + "extended"
    ifh = open(ifn,"r")
    ofh = open(outputPath,"w")
    
    for l in ifh.readlines():
        le = l.strip().split(" ")
        qid = le[0]
        docRetrievedFromFirstTier = le[1]
        docInTotal = le[2]
        gainCostRate = 0.0
        
        if qid in qidWithFullCostDict:
            gainCostRate = (float(docInTotal) - float(docRetrievedFromFirstTier)) / float(qidWithFullCostDict[qid])
            ofh.write(qid + " " + docRetrievedFromFirstTier + " " + "N/A" + " " + docInTotal + " " + str(qidWithFullCostDict[qid]) + " " + str(gainCostRate) + "\n")
        else:
            ofh.write(qid + " " + docRetrievedFromFirstTier + " " + "N/A" + " " + "0" + " " + "0" + " " + str(gainCostRate) + "\n")
    ifh.close()
    ofh.close()
    print "Overall:"
    print "path:",path
    print "outputPath:",outputPath    
exit(1)



# 1st pass
indexNumAndPercentageMappingDict = {}
indexNumAndPercentageMappingDict[0] = "debug%"
indexNumAndPercentageMappingDict[1] = "1%"
indexNumAndPercentageMappingDict[2] = "2%"
indexNumAndPercentageMappingDict[3] = "3%"
indexNumAndPercentageMappingDict[4] = "4%"
indexNumAndPercentageMappingDict[5] = "5%"
indexNumAndPercentageMappingDict[6] = "6%"
indexNumAndPercentageMappingDict[7] = "7%"
indexNumAndPercentageMappingDict[8] = "8%"
indexNumAndPercentageMappingDict[9] = "9%"
indexNumAndPercentageMappingDict[10] = "10%"
indexNumAndPercentageMappingDict[11] = "15%"
indexNumAndPercentageMappingDict[12] = "20%"
indexNumAndPercentageMappingDict[13] = "30%"
indexNumAndPercentageMappingDict[14] = "40%"
indexNumAndPercentageMappingDict[15] = "50%"
indexNumAndPercentageMappingDict[16] = "60%"
indexNumAndPercentageMappingDict[17] = "70%"
indexNumAndPercentageMappingDict[18] = "80%"
indexNumAndPercentageMappingDict[19] = "90%"

fileNameList = ["ptPowTo0","ptPowToDot1","ptPowToDot3","ptPowToDot5","ptPowToDot7","ptPowToDot9","ptPowTo1"]

ifnBasePart1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tiering/final/"
ifnBasePart2 = "_step3_final_"
ifn = ""
for part1 in fileNameList:
    ifnBase = ifnBasePart1 + part1 + ifnBasePart2
    for i in range(0,20):
        ifn = ifnBase + str(i)
        print "ifn:",ifn
        print "1st tier size:",indexNumAndPercentageMappingDict[i]
        print "2ed tier size:","100%"
        ifh = open(ifn,"r")
        l = ifh.readline()
        costN = 0
        costD = 0
        qualityN = 0
        qualityD = 0
        beginQuality = 0
        beginCost = 0
        while l:
            le = l.strip().split(" ")
            qualityN += int(le[1])
            qualityD += int(le[3])
            costN += int(le[2])
            costD += int(le[4])
            l = ifh.readline()
        beginQuality = qualityN/qualityD
        beginCost = costN/costD
        #print "Overall:"
        #print "beginQuality:",beginQuality
        #print "beginCost:",beginCost
        #print
        print "N/A",str(beginQuality),str(beginCost),"0","0.0"
        ifh.close()
        
        # 2ed pass
        NUM_OF_QUERIS = 4981
        targetQuality = [0.4,0.5,0.6,0.7,0.8,0.9]
        currQuality = beginQuality
        currCost = beginCost
        ifh = open(ifn,"r")
        l = ifh.readline()
        numOfQueriesFallingThrough = 1
        for currTargetValue in targetQuality:
            if currQuality > currTargetValue:
                pass
            else:
                
                while l:
                    le = l.strip().split(" ")
                    qualityN += int(le[3]) - int(le[1])
                    costN += int(le[4])
                    l = ifh.readline()
                    numOfQueriesFallingThrough += 1
                    currQuality = qualityN/qualityD
                    currCost = costN/costD
                    if currQuality > currTargetValue:
                        #print "currTargetValue:",currTargetValue
                        #print "currQuality:",currQuality
                        #print "currCost:",currCost
                        #print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough
                        #print "%OfQueriesFallingThrough:",numOfQueriesFallingThrough/NUM_OF_QUERIS
                        #print
                        print currTargetValue,currQuality,currCost,numOfQueriesFallingThrough,numOfQueriesFallingThrough/NUM_OF_QUERIS,indexNumAndPercentageMappingDict[i],"100%",part1,beginQuality,beginCost,currQuality-beginQuality,currCost-beginCost
                        break
        print
        ifh.close()
exit(1)

trecIDAndNumOfPostingsDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/data/gov2_docID_trecID_numOfPostings"
ifh0 = open(ifn0,"r")
l = ifh0.readline()
while l:
    le = l.strip().split(" ")
    trecID = le[1]
    numOfPostings = int(le[2])
    trecIDAndNumOfPostingsDict[trecID] = numOfPostings 
    l = ifh0.readline()

print "len(trecIDAndNumOfPostingsDict):",len(trecIDAndNumOfPostingsDict)

# TOP1000
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP1000_docHitScores_sortedByScore1_docSizeAdded_20140903"
# TOP100
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP100_docHitScores_sortedByScore1_docSizeAdded_20140903"
# TOP10
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP10_docHitScores_sortedByScore1_docSizeAdded_20140903"
ofh = open(ofn,"w")

# TOP1000
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP1000_docHitScores_sortedByScore1_20140903"
# TOP100
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP100_docHitScores_sortedByScore1_20140903"
# TOP10
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP10_docHitScores_sortedByScore1_20140903"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    trecID = le[0]
    ofh.write(l.strip() + " " + str(trecIDAndNumOfPostingsDict[trecID]) + "\n")
    l = ifh.readline()

ifh.close()
ofh.close()
print "Overall:"
print "ifn0:",ifn0
print "ifn:",ifn
print "ofn:",ofn
exit(1)


# TOP1000, 9.9M, AND
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP1000_docHitScores_20140902"
# TOP100, 9.9M, AND
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP100_docHitScores_20140902"
# TOP10, 9.9M, AND
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP10_docHitScores_20140902"

# TOP1000, 0.1M, AND
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/0.1MFakeQueries_AND_TOP1000_docHitScores_20140902"
# TOP100, 0.1M, AND
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/0.1MFakeQueries_AND_TOP100_docHitScores_20140902"
# TOP10, 0.1M, AND
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/0.1MFakeQueries_AND_TOP10_docHitScores_20140902"

# TOP1000, 95K, AND
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/95KTraining_AND_TOP1000_docHitScores_20140903"
# TOP100, 95K, AND
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/95KTraining_AND_TOP100_docHitScores_20140903"
# TOP10, 95K, AND
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/95KTraining_AND_TOP10_docHitScores_20140903"

# TOP1000, 5K, AND
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/5KTesting_AND_TOP1000_docHitScores_20140902"
# TOP100, 5K, AND
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/5KTesting_AND_TOP100_docHitScores_20140902"
# TOP10, 5K, AND
# ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/5KTesting_AND_TOP10_docHitScores_20140902"

ofh = open(ofn,"w")

# TOP1000, 9.9M, AND
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP1000_docHitRanks_20140902"
# TOP100, 9.9M, AND
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP100_docHitRanks_20140902"
# TOP10, 9.9M, AND
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/9.9MFakeQueries_AND_TOP10_docHitRanks_20140902"

# TOP1000, 0.1M, AND
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/0.1MFakeQueries_AND_TOP1000_docHitRanks_20140902"
# TOP100, 0.1M, AND
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/0.1MFakeQueries_AND_TOP100_docHitRanks_20140902"
# TOP10, 0.1M, AND
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/0.1MFakeQueries_AND_TOP10_docHitRanks_20140902"

# TOP1000, 95K, AND
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/95KTraining_AND_TOP1000_docHitRanks_20140902"
# TOP100, 95K, AND
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/95KTraining_AND_TOP100_docHitRanks_20140902"
# TOP10, 95K, AND
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/95KTraining_AND_TOP10_docHitRanks_20140902"

# TOP1000, 5K, AND
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/5KTesting_AND_TOP1000_docHitRanks_20140902"
# TOP100, 5K, AND
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/5KTesting_AND_TOP100_docHitRanks_20140902"
# TOP10, 5K, AND
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/5KTesting_AND_TOP10_docHitRanks_20140902"

ifh = open(ifn,"r")

l = ifh.readline()
while l:
    le = l.strip().split(" ")
    trecID = le[0]
    numOfRanks = int(le[1])
    theTotalScore1 = 0.0
    theTotalScore2 = 0.0
    for i in le[2:]:
        theTotalScore1 += 1.0/(float(i) + 1)
        theTotalScore2 += 1.0/math.log(float(i)+1)
    # print trecID,str(theTotalScore1),str(theTotalScore2)
    ofh.write(trecID + " " + str(theTotalScore1) + " " + str(theTotalScore2) + "\n")
    l = ifh.readline()
    
ifh.close()
ofh.close()

print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
exit(1)

counter00 = 0
counter1 = 0
counter2 = 0
counter3 = 0
counter4 = 0
counter5 = 0
counter6 = 0
counter7 = 0
counter8 = 0

counteraa = 0
counterb = 0
counterc = 0
counterd = 0
countere = 0
counterf = 0
counterg = 0
counterh = 0
counteri = 0

totalPostingSeen = 0
totalHit = 0

postingKeyDict = {}
# 95K
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/95KTrainingQueries_TOP1000_postingHitFreq_ALL_20140901histogram"
# 5K
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/5KTestingQueries_TOP1000_postingHitFreq_ALL_20140901histogram"
print "ifn:",ifn
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    postingKey = le[0]
    postingHit = int(le[1])
    if postingKey not in postingKeyDict:
        postingKeyDict[postingKey] = postingHit
        #if len(postingKeyDict) == 1000:
        #    break
    else:
        pass
    l = ifh.readline()
print "len(postingKeyDict):",len(postingKeyDict)
ifh.close()

ifnGold = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_ALL_20140831"
print "ifnGold:",ifnGold
ifnGoldHandler = open(ifnGold,"r")
l = ifnGoldHandler.readline()
lineCounter = 1
while l:
    le = l.strip().split(" ")
    postingKey = le[0]
    score1 = int(le[1])
    if postingKey in postingKeyDict:
        totalPostingSeen += 1
        totalHit += score1
        if score1 >= 1 and score1 < 10:
            counter00 += postingKeyDict[postingKey]
            counteraa += 1
        if score1 >= 10 and score1 < 50:
            counter1 += postingKeyDict[postingKey]
            counterb += 1
        if score1 >= 50 and score1 < 100:
            counter2 += postingKeyDict[postingKey]
            counterc += 1
        if score1 >= 100 and score1 < 500:
            counter3 += postingKeyDict[postingKey]
            counterd += 1
        if score1 >= 500 and score1 < 1000:
            counter4 += postingKeyDict[postingKey]
            countere += 1
        if score1 >= 1000 and score1 < 5000:
            counter5 += postingKeyDict[postingKey]
            counterf += 1
        if score1 >= 5000 and score1 < 10000:
            counter6 += postingKeyDict[postingKey]
            counterg += 1
        if score1 >= 10000 and score1 < 50000:
            counter7 += postingKeyDict[postingKey]
            counterh += 1
        if score1 >= 50000:
            counter8 += postingKeyDict[postingKey]
            counteri += 1
        #print postingKey,score1,postingKeyDict[postingKey]
        #if totalPostingSeen == 2:
        #    break
    l = ifnGoldHandler.readline()
    lineCounter += 1
    if lineCounter % 100000 == 0:
        print lineCounter,"line processed."
ifnGoldHandler.close()

print "Overall:"
print "0",counter00,counteraa
print "1",counter1,counterb
print "2",counter2,counterc
print "3",counter3,counterd
print "4",counter4,countere
print "5",counter5,counterf
print "6",counter6,counterg
print "7",counter7,counterh
print "8",counter8,counteri
print "totalPostingSeen:",totalPostingSeen
print "totalHit:",totalHit
exit(1)

postingKeyHitFreqHistogramDict = {}
# ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_ALL_20140831"
# ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/95KTrainingQueries_TOP1000_postingHitFreq_ALL_20140901"
ifn3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/5KTestingQueries_TOP1000_postingHitFreq_ALL_20140901"
ifh = open(ifn3,"r")

ofn = ifn3 + "histogram"
ofh = open(ofn,"w")

l = ifh.readline()
while l:
    le = l.strip().split(" ")
    postingKey = le[1] + "_" + le[2]
    if postingKey not in postingKeyHitFreqHistogramDict:
        postingKeyHitFreqHistogramDict[postingKey] = 1
        if len(postingKeyHitFreqHistogramDict) % 10000 == 0:
            print "len(postingKeyHitFreqHistogramDict):",len(postingKeyHitFreqHistogramDict),"recorded."
    else:
        postingKeyHitFreqHistogramDict[postingKey] += 1

    l = ifh.readline()
ifh.close()
print "Overall:"
for postingKey in postingKeyHitFreqHistogramDict:
    ofh.write(str(postingKey) + " " + str(postingKeyHitFreqHistogramDict[postingKey]) + "\n")
print "ofn:",ofn
exit(1)


ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/5KTestingQueries_postingHitFreq_ALL_20140901"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/raw_5KTestingQueries_postingHitFreq_ALL_20140901"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    if le[-1].startswith("GX"):
        ofh.write(l)
    l = ifh.readline()

print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
ofh.close()
exit(1)

queryContentDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/10M.100KQ.5gram.ModKN.txt_toolkitCompatible"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 1
numOfQueriesNotSeen = 0
while l:
    le = l.strip().split(":")
    queryContent = le[1]
    if queryContent not in queryContentDict:
        queryContentDict[queryContent] = 1
        numOfQueriesNotSeen += 1
    else:
        queryContentDict[queryContent] += 1
    l = ifh.readline()
    lineCounter += 1
    if lineCounter % 1000000 == 0:
        # print "lineCounter:",lineCounter,"processed."
        print lineCounter,len(queryContentDict),numOfQueriesNotSeen
        numOfQueriesNotSeen = 0
        # print

print "Overall:"
print "ifn:",ifn
ifh.close()
exit(1)

queryContentDict = {}

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/10M.100KQ.5gram.ModKN.txt_contentAndGenerateFreq_20140901"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/10M.100KQ.5gram.ModKN.txt_toolkitCompatible"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 1
while l:
    le = l.strip().split(":")
    queryID = le[0]
    queryContent = le[1]
    
    #if queryContent.strip() == "":
    #    print queryID,queryContent
    #    exit(1)
    
    if queryContent not in queryContentDict:
        queryContentDict[queryContent] = 1
    else:
        queryContentDict[queryContent] += 1
    l = ifh.readline()
    lineCounter += 1
    if lineCounter % 1000000 == 0:
        print "lineCounter:",lineCounter,"processed."

print "Overall:"
for queryContent in queryContentDict:
    ofh.write(str(queryContentDict[queryContent]) + " " + queryContent + "\n")
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
ofh.close()
exit(1)



print "Today, 20140901, let's start the game."
# current hit for each bucket
counter00 = 0
counter1 = 0
counter2 = 0
counter3 = 0
counter4 = 0
counter5 = 0
counter6 = 0
counter7 = 0
counter8 = 0

# current # of posting for each bucket
counteraa = 0
counterb = 0
counterc = 0
counterd = 0
countere = 0
counterf = 0
counterg = 0
counterh = 0
counteri = 0

totalHit = 0
totalNumOfPostings = 0

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_ALL_20140831"
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 1
while l:
    le = l.strip().split(" ")
    postingKey = le[0]
    score1 = int(le[1])
    totalHit += score1
    totalNumOfPostings += 1
    if score1 >= 1 and score1 < 10:
        counter00 += score1
        counteraa += 1
    if score1 >= 10 and score1 < 50:
        counter1 += score1
        counterb += 1
    if score1 >= 50 and score1 < 100:
        counter2 += score1
        counterc += 1
    if score1 >= 100 and score1 < 500:
        counter3 += score1
        counterd += 1
    if score1 >= 500 and score1 < 1000:
        counter4 += score1
        countere += 1
    if score1 >= 1000 and score1 < 5000:
        counter5 += score1
        counterf += 1
    if score1 >= 5000 and score1 < 10000:
        counter6 += score1
        counterg += 1
    if score1 >= 10000 and score1 < 50000:
        counter7 += score1
        counterh += 1
    if score1 >= 50000:
        counter8 += score1
        counteri += 1
    
    l = ifh.readline()
    lineCounter += 1
    
    if lineCounter % 1000000 == 0:
        print "lineCounter:",lineCounter,"processed."
    
ifh.close()
print "Overall:"
print "current hit for each bucket"
print "counter00:",counter00
print "counter1:",counter1
print "counter2:",counter2
print "counter3:",counter3
print "counter4:",counter4
print "counter5:",counter5
print "counter6:",counter6
print "counter7:",counter7
print "counter8:",counter8
print "current # of postings for each bucket"
print "counteraa:",counteraa
print "counterb:",counterb
print "counterc:",counterc
print "counterd:",counterd
print "countere:",countere
print "counterf:",counterf
print "counterg:",counterg
print "counterh:",counterh
print "counteri:",counteri
print "totalNumOfPostings:",totalNumOfPostings
print "totalHit:",totalHit
exit(1)

counter00 = 0
counter1 = 0
counter2 = 0
counter3 = 0
counter4 = 0
counter5 = 0
counter6 = 0
counter7 = 0
counter8 = 0

counteraa = 0
counterb = 0
counterc = 0
counterd = 0
countere = 0
counterf = 0
counterg = 0
counterh = 0
counteri = 0

totalHit = 0
totalNumOfPostings = 0

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_ALL_20140831"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    postingKey = int(le[0])
    score1 = int(le[1])
    totalHit += postingKey * score1
    totalNumOfPostings += score1
    if postingKey >= 1 and postingKey < 10:
        counter00 += score1
    if postingKey >= 10 and postingKey < 50:
        counter1 += score1
    if postingKey >= 50 and postingKey < 100:
        counter2 += score1
    if postingKey >= 100 and postingKey < 500:
        counter3 += score1
    if postingKey >= 500 and postingKey < 1000:
        counter4 += score1
    if postingKey >= 1000 and postingKey < 5000:
        counter5 += score1
    if postingKey >= 5000 and postingKey < 10000:
        counter6 += score1
    if postingKey >= 10000 and postingKey < 50000:
        counter7 += score1
    if postingKey >= 50000:
        counter8 += score1
    l = ifh.readline()
ifh.close()
print "Overall:"
print "counter00:",counter00
print "counter1:",counter1
print "counter2:",counter2
print "counter3:",counter3
print "counter4:",counter4
print "counter5:",counter5
print "counter6:",counter6
print "counter7:",counter7
print "counter8:",counter8
print "totalHit:",totalHit
print "totalNumOfPostings:",totalNumOfPostings
exit(1)

basePath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/10MPostingHitCollection/"
mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/9Dot9MFakeQueries/"
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
print len(f)
fileListToProcess = []
for ifn in f:
    path = mypath + ifn
    fileListToProcess.append(path)
fileListToProcess.append("/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/or_semantics_results/dot1MFakeQueries/xau_heldout_OR_rawResult")
print "len(fileListToProcess):",len(fileListToProcess)
for ifn in fileListToProcess:
    print "python /home/vgc/wei/workspace/NYU_IRTK/scripts/src/pythonScripts/pruning/collectThePostings.py " + ifn + " > " + basePath + ifn.strip().split("/")[-1] + "_postingHitCollection" + " &"
exit(1)

postingKeyFreqHitDict = {}
mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/and_semantics_results/10MPostingHitCollection/"
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_ALL_20140831"
ofh = open(ofn,"w")
print "ofn:",ofn
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
print len(f)

fileCounter = 0
totalHitCounter = 0
for ifn in f:
    if ifn.endswith("_histogram"):
        path = mypath + ifn
        print fileCounter,"processing ",path
        ifh = open(path,"r")
        l = ifh.readline()
        while l:
            le = l.strip().split(" ")
            postingKey = le[0]
            postingKey = int(le[1])
            totalHitCounter += postingKey
            if postingKey not in postingKeyFreqHitDict:
                postingKeyFreqHitDict[postingKey] = postingKey
                #if len(postingKeyFreqHitDict) % 100 == 0:
                #    break 
            else:
                postingKeyFreqHitDict[postingKey] += postingKey
            
            l = ifh.readline()     
        ifh.close()
        print "totalHitCounter:",totalHitCounter
        print "len(postingKeyFreqHitDict):",len(postingKeyFreqHitDict)
        print
        fileCounter += 1

print "Overall:"
for postingKey in postingKeyFreqHitDict:
    ofh.write(str(postingKey) + " " + str(postingKeyFreqHitDict[postingKey]) + "\n")
print "ofn:",ofn
ofh.close()
exit(1)

counter00 = 0
counter1 = 0
counter2 = 0
counter3 = 0
counter4 = 0
counter5 = 0
counter6 = 0
counter7 = 0
counter8 = 0
totalHit = 0
totalNumOfPostings = 0

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/histogram_ALL_sorted_20140831"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    postingKey = int(le[0])
    score1 = int(le[1])
    totalHit += postingKey * score1
    totalNumOfPostings += score1
    if postingKey >= 1 and postingKey < 10:
        counter00 += score1
    if postingKey >= 10 and postingKey < 50:
        counter1 += score1
    if postingKey >= 50 and postingKey < 100:
        counter2 += score1
    if postingKey >= 100 and postingKey < 500:
        counter3 += score1
    if postingKey >= 500 and postingKey < 1000:
        counter4 += score1
    if postingKey >= 1000 and postingKey < 5000:
        counter5 += score1
    if postingKey >= 5000 and postingKey < 10000:
        counter6 += score1
    if postingKey >= 10000 and postingKey < 50000:
        counter7 += score1
    if postingKey >= 50000:
        counter8 += score1
    l = ifh.readline()
ifh.close()
print "Overall:"
print "counter00:",counter00
print "counter1:",counter1
print "counter2:",counter2
print "counter3:",counter3
print "counter4:",counter4
print "counter5:",counter5
print "counter6:",counter6
print "counter7:",counter7
print "counter8:",counter8
print "totalHit:",totalHit
print "totalNumOfPostings:",totalNumOfPostings
exit(1)




postingHitFreqDict = {}
# counter = 0
ifn = sys.argv[1]
ofn = ifn + "_histogram"

print "ifn:",ifn
print "ofn:",ofn

ifh = open(ifn,"r")
ofh = open(ofn,"w")

l = ifh.readline()
while l:
    le = l.strip().split(" ")
    if le[-1].startswith("GX"):
        postingKey = le[1] + "_" + le[2]
        if postingKey not in postingHitFreqDict:
            postingHitFreqDict[postingKey] = 1
        else:
            postingHitFreqDict[postingKey] += 1
        # counter += 1
        # print "l:",l
        #if len(postingHitFreqDict) == 100:
        #    break
    l = ifh.readline()

print "Overall:"
for postingKey in postingHitFreqDict:
    ofh.write(str(postingKey) + " " + str(postingHitFreqDict[postingKey]) + "\n")
print "ifn:",ifn
print "ofn:",ofn
ifh.close()
ofh.close()
exit(1)

postingHitFreqOverallDict = {}
mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/and_semantics_results/postingHitCollection/"

f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
print len(f)
for ifn in f:
    print "python /home/vgc/wei/workspace/NYU_IRTK/scripts/src/pythonScripts/pruning/simplePythonPlayGround.py " + mypath + ifn + " > " + mypath + ifn + "_verify &"
exit(1)

postingHitFreqOverallDict = {}
mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_20140831/histogram/"

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/histogram_ALL_20140831"
ofh = open(ofn,"w")
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
print len(f)

for ifn in f:
    path = mypath + ifn
    print "processing ",path
    ifh = open(path,"r")
    l = ifh.readline()
    while l:
        le = l.strip().split(" ")
        postingKey = le[0]
        numOfPostingBelongingTo = int(le[1])
        if postingKey not in postingHitFreqOverallDict:
            postingHitFreqOverallDict[postingKey] = numOfPostingBelongingTo
        else:
            postingHitFreqOverallDict[postingKey] += numOfPostingBelongingTo
        l = ifh.readline()
    ifh.close()

print "Overall:"
for postingHitFreq in postingHitFreqOverallDict:
    ofh.write(str(postingHitFreq) + " " + str(postingHitFreqOverallDict[postingHitFreq]) + "\n")
ofh.close()
exit(1)



'''
# generate the commands:
mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_20140831/"
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
print len(f)
for ifn in f:
    print "# python /home/vgc/wei/workspace/NYU_IRTK/scripts/src/pythonScripts/pruning/simplePythonPlayGround.py " + mypath + ifn + " &"
exit(1)
'''

ifn = sys.argv[1]
ofn_log = ifn + "_LOG_20140831"
ofn = ifn + "_histogram_20140831"

postingHitFreqDict = {}
#print "ifn:",ifn
#print "ofn_log:",ofn_log
#print "ofn:",ofn
ifh = open(ifn,"r")
l = ifh.readline()
lineCounter = 1
ofh = open(ofn,"w")
ofh2 = open(ofn_log,"w")

while l:
    le = l.strip().split(" ")
    postingKey = int(le[1])
    if postingKey not in postingHitFreqDict:
        postingHitFreqDict[postingKey] = 1
        if len(postingHitFreqDict) % 10000 == 0:
		ofh2.write(str( len(postingHitFreqDict) ) + " recorded." + "\n")
		# print "len(postingHitFreqDict):",len(postingHitFreqDict)
    else:
        postingHitFreqDict[postingKey] += 1
    l = ifh.readline()
    lineCounter += 1
    if lineCounter % 1000000 == 0:
        # print "lineCounter:",lineCounter,"processed."
        ofh2.write(str( lineCounter ) + " lines processed." + "\n")
# print "Overall:"
# print "ifn:",ifn
# print "ofn:",ofn
# print "ofn_log:",ofn_log
for postingKey in postingHitFreqDict:
    ofh.write(str(postingKey) + " " + str(postingHitFreqDict[postingKey]) + "\n") 
# print "DONE."
ofh2.write(str("DONE processing.") + "\n")
ifh.close()
ofh.close()
ofh2.close()
exit(1)














exit(1)

lowerBound = int(sys.argv[1])
upperBound = int(sys.argv[2])
print "lowerBound:",lowerBound
print "upperBound:",upperBound

postingHitFreqDict = {}
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_TOP100_20140830_" + str(lowerBound) + "_" + str(upperBound)
print "ofn:",ofn
# mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLog/and_semantics_results/"
mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/and_semantics_results/postingHitCollection/subPostingMerging/sorted/"
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
print len(f)
# print f

for partOfFileName in f[lowerBound:upperBound]:
    ifn = mypath + partOfFileName
    print "ifn:",ifn
    ifh = open(ifn,"r")
    l = ifh.readline()
    while l:
        le = l.strip().split(" ")
        postingKey = le[0]
        postingKey = int(le[1])
        if postingKey not in postingHitFreqDict:
            postingHitFreqDict[postingKey] = postingKey
        else:
            postingHitFreqDict[postingKey] += postingKey
        l = ifh.readline()
        if len(postingHitFreqDict) % 1000000 == 0:
            print "len(postingHitFreqDict):",len(postingHitFreqDict)
    ifh.close()
    
print "Overall:"
ofh = open(ofn,"w")
for postingKey in postingHitFreqDict:
    ofh.write(postingKey + " " + str(postingHitFreqDict[postingKey]) + "\n")
print "ofn:",ofn
exit(1)

postingHitFreqDict = {}
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_TOP1000_20140829_histogram_0_100"
ofh = open(ofn,"w")

ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_TOP1000_20140829_0_100"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    postingKey = le[0]
    postingKey = int(le[1])
    if postingKey not in postingHitFreqDict:
        postingHitFreqDict[postingKey] = 1
        if len(postingHitFreqDict) % 10000 == 0:
            print "len(postingHitFreqDict):",len(postingHitFreqDict)
    else:
        postingHitFreqDict[postingKey] += 1

    l = ifh.readline()

print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
for postingKey in postingHitFreqDict:
    ofh.write(str(postingKey) + " " + str(postingHitFreqDict[postingKey]) + "\n") 
ifh.close()
ofh.close()
exit(1)



TOPK = 1000
trecIDHitFreqDict = {}
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trecIDHitsCollection_notSorted_TOP1000_AND_testingQueries_20140829_sanityCheck"
ofh = open(ofn,"w")

# option1:
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_TOP1000_AND_testingQueries_20140827"
# option2:
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/queryProcessingCostAnalysis/gov2/rawResults_50%_TOP1000_OR_20140126Night"
ifh = open(ifn,"r")
l = ifh.readline()
while l:
    le = l.strip().split(" ")
    if len(le) == 25 and le[-2].startswith("GX"):
        currTrecID = le[-2]
        theRank = int(le[0])
        if theRank <= TOPK:
            if currTrecID not in trecIDHitFreqDict: 
                trecIDHitFreqDict[currTrecID] = 1
            else:
                trecIDHitFreqDict[currTrecID] += 1
    else:
        pass
    l = ifh.readline()

print "Overall:"
print "ofn:",ofn
for trecID in trecIDHitFreqDict:
    ofh.write(trecID + " " + str(trecIDHitFreqDict[trecID]) + "\n")
ifh.close()
ofh.close()
exit(1)





hitFreqDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_TOP1000_20140828_30_40"
ifh = open(ifn,"r")

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/postingHitFreq_TOP1000_20140829_histogram_30_40"
ofh = open(ofn,"w")

l = ifh.readline()
lineCounter = 1
while l:
    le = l.strip().split(" ")
    postingKey = le[0]
    freq = int(le[1])
    
    if freq in hitFreqDict:
        hitFreqDict[freq] += 1
    else:
        hitFreqDict[freq] = 1
    
    l = ifh.readline()
    lineCounter += 1
    if lineCounter % 1000000 == 0:
        print "lineCounter:",lineCounter

print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
for currHitFreq in hitFreqDict:
    ofh.write(str(currHitFreq) + " " + str(hitFreqDict[currHitFreq]) + "\n")
ifh.close()
ofh.close()
exit(1)

trecIDAndHitsDict = {}
# mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLog/and_semantics_results/"
mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/and_semantics_results/postingHitCollection/subPostingMerging/sorted/"
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
print len(f)
# print f

if len(f) % 2 == 0:
    numOfJoins = len(f) / 2
    baseIndex = 0
    for i in range(0,numOfJoins):
        f1 = mypath + f[baseIndex]
        f2 = mypath + f[baseIndex + 1]
        #print f1
        #print f2
        #print
        print "join -a1 -a2 " + f1 + " " + f2 + " > " + str(baseIndex) + "_" + str(baseIndex+1)+ "_sorted_round1 &"
        baseIndex += 2
exit(1)

trecIDAndHitsDict = {}
# mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLog/and_semantics_results/"
mypath = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/fakeQueryLogRelated/and_semantics_results/postingHitCollection/subPostingMerging/"
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
print len(f)
for ifn in f:
    print "# sort --key=1 " + mypath + ifn + " > " + mypath + ifn + "_sorted &"
exit(1)

ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/10M.100KQ.5gram.ModKN.txt_toolkitCompatible"
ofh = open(ofn,"w")
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/10M.100KQ.5gram.ModKN.txt"
ifh = open(ifn,"r")
l = ifh.readline()
counter = 1
while l:
    ol = str(counter) + ":" + l
    ofh.write(ol)
    l = ifh.readline()
    counter += 1
ifh.close()
ofh.close()
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
exit(1)

HJQueriesDict = {}
ifn0 = "/home/vgc/wei/workspace/NYU_IRTK/data/gov2_04-06.topics.polyIRTKCompatibleMode"
ifh = open(ifn0,"r")
for line in ifh.readlines():
    queryContent = line.strip().split(":")[1]
    HJQueriesDict[queryContent] = 1
print HJQueriesDict
ifh.close()

tempCounter = 0
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2_100KQueries_head95K"
ifh = open(ifn1,"r")
for line in ifh.readlines():
    lineElements = line.strip().split(":")
    queryID = lineElements[0]
    queryContent = lineElements[1]
    # print queryID,queryContent
    if queryContent in HJQueriesDict:
        print queryID
        tempCounter += 1
ifh.close()
exit(1)

postingDict1 = {}
ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/dynamicWeight_5_ptPowTo1_unigram_20140722"
ifh1 = open(ifn1,"r")
cl = ifh1.readline()
while cl:
    cle = cl.strip().split(" ")
    postingDict1[cle[0]] = 1
    cl = ifh1.readline()
ifh1.close()
print "len(postingDict1):",len(postingDict1)

postingDict2 = {}
ifn2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/dynamicWeight_1_bigrams-0.78-qb.dyn_bigram_20140722"
ifh2 = open(ifn2,"r")
cl = ifh2.readline()
while cl:
    cle = cl.strip().split(" ")
    postingDict2[cle[0]] = 1
    cl = ifh2.readline()
ifh1.close()
print "len(postingDict2):",len(postingDict2)

print "OVERALL:"
intersectionSize = len( set(postingDict1).intersection(set(postingDict2)) )
unionSize = len( set(postingDict1).union(set(postingDict2)) )
print "intersectionSize:",intersectionSize
print "unionSize:",unionSize
print "rate:",intersectionSize / unionSize
exit(1)


tempCounter = 0
numTOPPosting = 0
numTOPDR = 0
# unigram
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/evaluate_ALL_IN_ONE_GOV2_cutoff100_queryLength_ALL_dynamicWeight_5_ptChangedFromJuan_ptPowTo1_originalPTopK_20140722_TOP10PostingRecorded"
# bigram
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/evaluate_ALL_IN_ONE_GOV2_cutoff100_queryLength_ALL_dynamicWeight_1_ptChangedFromJuan_bigrams-0.78-qb.dyn_20140722_TOP10PostingRecorded"
ifh = open(ifn,"r")
cl = ifh.readline()
while cl:
    if cl.strip().startswith("OVERALL:"):
        tempCounter += 1
    
    if cl.startswith("---TOP10PostingRetrieved-->"):
        numTOPPosting += 1
        cle = cl.strip().split(" ")
        print cle[1]+"_"+cle[2],cle[3],cle[4]
    
    #if tempCounter == 1 and cl.strip().endswith("NYU_IRTK"):
    #    numTOPDR += 1    
    #    print cl.strip()
    
    if tempCounter == 3:
        break
    
    cl = ifh.readline()

#print "DONE"
#print "numTOPPosting:",numTOPPosting
# print "numTOPDR:",numTOPDR
exit(1)

inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2_100KQueries_tail_5K"
outputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/GOV2_100KQueries_tail_5K_formatted"

inputFileHandler = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

queryTermDictWithRealFreqInQueries = {}
queryTermList = []
outputLine = ""

for line in inputFileHandler.readlines():
    outputLine = ""
    queryID = line.strip().split(":")[0]
    queryTermList = line.strip().split(":")[1].strip().split(" ")
    # print "queryTermList:",queryTermList
    
    data = ""
    for element in queryTermList:
        data += element + " "
    
    # print "data(old):",data
    # print "original data:",data
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]

    # print "data(new):",data
    
    currentNewQueryTermList = data.strip().split(" ")
    currentNewQueryTermDict = {}
    
    for queryTerm in currentNewQueryTermList:
        if queryTerm.strip() != "":
            queryTermLower = queryTerm.lower()
            if queryTermLower not in currentNewQueryTermDict:
                currentNewQueryTermDict[queryTermLower] = 1
    
    outputLine = queryID + ":"
    for queryTerm in currentNewQueryTermDict:
        outputLine += queryTerm + " "
    outputLine = outputLine.strip()
    outputFileHandler.write(outputLine + "\n")

print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
inputFileHandler.close()
outputFileHandler.close()
exit(1)



#14 12899 137.866633548
print 14 * math.log(12899/14,2)
exit(1)

#133578,2279369,2711008,16011015,378956.558574,2.83696835237
#print 133578 * math.log(2279369/133578)
#print math.log(2279369/133578)
#exit(1)

#120,709,5501,476055,1176095,4133243,4265171,10531524,16011015,16624954,1452.62021926,2.04882964634,117
#print 709 * math.log(5501/709)
#print math.log(5501/709)
#exit(1)

QPCInTotal = 0
queryLengthAndTotalResultsDict = {}
queryLengthAndTotalCostDict = {}
queryLengthAndNumOfQueriesBelongingToDict = {}

qidAndQPCForMaxScoreDict = {}
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/results/usefulInfoForMaxScore_20140715_attempt2"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    QID = lineElements[0]
    totalQPC = lineElements[2]
    qidAndQPCForMaxScoreDict[QID] = totalQPC
print "len(qidAndQPCForMaxScoreDict):",len(qidAndQPCForMaxScoreDict)

termANDUnprunedListLengthDict = {}
# for gov2
# for vidaserver1:
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/queryProcessingCostAnalysis/gov2/queryTermsWithTheirLengthsOfInvertedList"
# for clueweb09B
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/partOfLexiconTermsWithTermIDForTestingQueries_clueweb09B"
inputFileHandler = open(ifn,"r")
currentLine = inputFileHandler.readline()
currentLineNum = 0
while currentLine:
    if currentLineNum % 1000000 == 0:
        print currentLineNum,"lines loaded."
    currentLineElements = currentLine.strip().split(" ")
    currentTerm = currentLineElements[0]
    currentTermListLength = int(currentLineElements[1])
    termANDUnprunedListLengthDict[currentTerm] = currentTermListLength 
    currentLine = inputFileHandler.readline()
    currentLineNum += 1
print "len(termANDUnprunedListLengthDict): ",len(termANDUnprunedListLengthDict)
inputFileHandler.close()

numOfQueriesHavingQID = 0
numOfQueriesHavingSearchContent = 0
numOfResultsForAllTheQueries = 0

outputFileHandlers = []
for i in range(0,11):
    outputFileName = "/home/vgc/wei/workspace/NYU_IRTK/results/gov2_head5KQueries_OR_20140716_regressionTrainingFile_queryLength_" + str(i)
    outputFileHandler = open(outputFileName,"w")
    outputFileHandlers.append(outputFileHandler)

inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/results/raw_results_OR_gov2_head5KQueries_20140715_from_vidaserver1"
inputFileHandler1 = open(inputFileName1,"r")
currentLine = inputFileHandler1.readline()
nextLine = ""
currentQID = ""

processFlag = True
outputLine = ""
while currentLine:
    # sampling parsing line:
    # qid: 701
    if currentLine.startswith("qid:"):
        currentQID = currentLine.strip().split(":")[1].strip()
        print
        print "QID:",currentQID
        print "QPC:",qidAndQPCForMaxScoreDict[currentQID]
        # qidAndQPCForMaxScoreDict[currentQID],"ms"
        outputLine = ""
        outputLine = str(currentQID) + ","
        # print "numOfQueriesHavingQID:",numOfQueriesHavingQID
        numOfQueriesHavingQID += 1
        # debug
        #if numOfQueriesHavingQID == 500:
        #    break

    # sample parsing line:
    # Search: u s  oil industry history
    if currentLine.startswith("Search:") and processFlag:
        elementList = currentLine.strip().split(" ")
        
        currentSearchContent = ""
        
        queryTermList = []
        for element in elementList[1:]:
            if element.strip() != "":
                queryTermList.append( element.strip() )
        
        for element in queryTermList:
            currentSearchContent += element + " "
        
        print "QueryContent:",currentSearchContent
        numOfQueriesHavingSearchContent += 1
        
        currentQueryTermIndexDict = {}
        nextLine = inputFileHandler1.readline()
        # print nextLine.strip()
        parsedFlag = True
        
        if nextLine.strip().endswith("is NOT in the lexicon."):
            parsedFlag = False
        
        if nextLine.strip() != "" and parsedFlag:
            nextLineElements = nextLine.strip().split(" ")
            # print "nextLineElements:",nextLineElements
            
            # sample parsing line: 
            # oil:0 industry:1 history:2 u:3 s:4
            for element in nextLineElements:
                if element.split(":")[0] not in currentQueryTermIndexDict:
                    currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
            
            # print "currentQueryTermIndexDict:",currentQueryTermIndexDict
            # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
            currentQueryLength = len(currentQueryTermIndexDict)
            print "QueryLength:",currentQueryLength
            if currentQueryLength not in queryLengthAndTotalCostDict:
                queryLengthAndTotalCostDict[currentQueryLength] = 0
            if currentQueryLength not in queryLengthAndNumOfQueriesBelongingToDict:
                queryLengthAndNumOfQueriesBelongingToDict[currentQueryLength] = 0
            
            queryLengthAndTotalCostDict[currentQueryLength] += int(qidAndQPCForMaxScoreDict[currentQID])
            queryLengthAndNumOfQueriesBelongingToDict[currentQueryLength] += 1
            QPCInTotal += int(qidAndQPCForMaxScoreDict[currentQID])
            
            
            for i in range(0,len(currentQueryTermIndexDict)):
                currentTerm = currentQueryTermIndexDict[i]
                print i,currentTerm,termANDUnprunedListLengthDict[currentTerm]
                outputLine += str(termANDUnprunedListLengthDict[currentTerm]) + ","
                if i == 0:
                    shortestList = int(termANDUnprunedListLengthDict[currentTerm])
                if i == 1:
                    longerList = int(termANDUnprunedListLengthDict[currentTerm])
            costValue1 = shortestList * math.log(longerList/shortestList)
            costValue2 = math.log(longerList/shortestList)
            outputLine += str(costValue1) + ","
            outputLine += str(costValue2) + ","
            outputLine += qidAndQPCForMaxScoreDict[currentQID]
            
            # for queryLength = ALL file
            outputFileHandlers[0].write(outputLine + "\n")
            if currentQueryLength <= 10:
                outputFileHandlers[currentQueryLength].write(outputLine + "\n")
            
            
            # Now, it is time to read the actual training example line
            currentLine = inputFileHandler1.readline()
            numOfResultsForTheCurrentQuery = 0
        
    currentLine = inputFileHandler1.readline()

# print "numOfQueriesHavingQID:",numOfQueriesHavingQID
# print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
# print "numOfResultsForAllTheQueries:",numOfResultsForAllTheQueries
# print

print
print "OVERALL:"
tempCounter = 0
for currentQueryLength in range(1,11):
    avgQPCForThisQueryLength = queryLengthAndTotalCostDict[currentQueryLength] / queryLengthAndNumOfQueriesBelongingToDict[currentQueryLength]
    print currentQueryLength,queryLengthAndNumOfQueriesBelongingToDict[currentQueryLength],round(avgQPCForThisQueryLength,2),"ms"
    tempCounter += queryLengthAndNumOfQueriesBelongingToDict[currentQueryLength]

print "QPCInTotal:",QPCInTotal
print "tempCounter:",tempCounter
print "avg:",QPCInTotal/tempCounter
inputFileHandler1.close()
outputFileHandler.close()
print "Overall:"
print "output a set of files"
exit(1)

inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/results/raw_results_OR_gov2_head1KQueries_20140714_from_pangolin"
inputFileHandler = open()
exit(1)

print "2.e+03"
print int("2.e+03")
exit(1)

'''
# evenly sampled time at 200ms intervals
t = np.arange(0., 5., 0.2)
# red dashes, blue squares and green triangles
plt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^')
plt.show()
exit(1)
'''

inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/quality_control_gov2_tail5K_20140711.txt"
inputFileHandler = open(inputFileName,"r")

allXValueList = []
allYValueList = []

currentXValues = []
currentYValues = []
currentHeaderLine = ""
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    # dataset=GOV2 queryLength=ALL date=20140708 dynamicWeight=0 numOfQueries=5000 note=ptPowTo1_ptOriginalF|dataset=GOV2 queryLength=ALL date=20140708 dynamicWeight=0 numOfQueries=5000 note=ptPowTo1_ptOriginalFromWei
    if lineElements[0].startswith("dataset"):
        # print line.strip()
        
        if currentHeaderLine != "":
            print currentHeaderLine.strip()
            for index,value in enumerate(currentXValues):
                print currentXValues[index],currentYValues[index]
            print

            # blue squares: 'bs'
            # green triangle: 'g^'
            # plt.plot(currentXValues, currentYValues, 'bs')
            # plt.axis([0, 1, 0, 1])
            # plt.show()
            allXValueList.append(currentXValues)
            allYValueList.append(currentYValues)
            
        currentHeaderLine = line.strip()
        
        currentXValues = []
        currentYValues = []
        
        #dataset = lineElements[0].split("=")[0]
        #queryLength = lineElements[1].split("=")[0]
        #date = lineElements[2].split("=")[0]
        #dynamicWeight = lineElements[3].split("=")[0]
        #numOfQueries = lineElements[4].split("=")[0]
        note = lineElements[5].split("=")[0]
    if len(lineElements) == 12 and lineElements[0].endswith("%"): # data line
        if lineElements[0][:-1] == "d":
            percentageIndexKept = 0.0008
        else:
            percentageIndexKept = float(lineElements[0][:-1]) / 100
        #percentageTOP10DocumentResultPreservedAt10 = float(lineElements[-4])
        #percentageTOP10PostingPreservedAt10 = float(lineElements[-3])
        #percentageTOP10PostingPreservedInPrunedIndex = float(lineElements[-2])
        percentageQueryProcessingCost = float(lineElements[-1])
        currentXValues.append(percentageIndexKept)
        currentYValues.append(percentageQueryProcessingCost)

# final print
print currentHeaderLine.strip()
for index,value in enumerate(currentXValues):
    print currentXValues[index],currentYValues[index]
print
# blue squares: 'bs'
# green triangle: 'g^'
# plt.plot(currentXValues, currentYValues, 'bs')
# plt.axis([0, 1, 0, 1])
# plt.show()
allXValueList.append(currentXValues)
allYValueList.append(currentYValues)
inputFileHandler.close()

print "Overall big plot"
print len(allXValueList)
print len(allYValueList)

xLabelStr = "% index kept" 
yLabelStr = "% of TOP10 document results preserved"
tStrP1 = xLabelStr + " " + "versus" + " " + yLabelStr 
tStrP2 = "dataset=GOV2"
tStrP3 = "queryLength=ALL"
tStrP4 = "dynamicWeight=0"
tStrP5 = "numOfQueries=5000"
tStrComplete = tStrP1 + ". " + tStrP2 + " " + tStrP3 + " " + tStrP4 + " " + tStrP5
plt.title(tStrComplete)
plt.xlabel(xLabelStr)
plt.ylabel(yLabelStr)
plt.plot(allXValueList[0], allYValueList[0], \
         color='b', linestyle='dashed', marker='v', markerfacecolor='b', markersize=6, label='POW(pt,1)')
plt.plot(allXValueList[1], allYValueList[1], \
         color='g', linestyle='dashed', marker='^', markerfacecolor='g', markersize=6, label='POW(pt,0.9)')
plt.plot(allXValueList[2], allYValueList[2], \
         color='r', linestyle='dashed', marker='<', markerfacecolor='r', markersize=6, label='POW(pt,0.7)')
plt.plot(allXValueList[3], allYValueList[3], \
         color='c', linestyle='dashed', marker='>', markerfacecolor='c', markersize=6, label='POW(pt,0.5)')
plt.plot(allXValueList[4], allYValueList[4], \
         color='m', linestyle='dashed', marker='D', markerfacecolor='m', markersize=6, label='POW(pt,0.3)')
plt.plot(allXValueList[5], allYValueList[5], \
         color='y', linestyle='dashed', marker='*', markerfacecolor='y', markersize=6, label='POW(pt,0.1)')
plt.plot(allXValueList[6], allYValueList[6], \
         color='k', linestyle='dashed', marker='s', markerfacecolor='k', markersize=6, label='POW(pt,0)')

'''
plt.plot(allXValueList[0], allYValueList[0], 'bv', label='POW(pt,1)')
plt.plot(allXValueList[1], allYValueList[1], 'g^', label='POW(pt,0.9)')
plt.plot(allXValueList[2], allYValueList[2], 'r<', label='POW(pt,0.7)')
plt.plot(allXValueList[3], allYValueList[3], 'c>', label='POW(pt,0.5)')
plt.plot(allXValueList[4], allYValueList[4], 'mD', label='POW(pt,0.3)')
plt.plot(allXValueList[5], allYValueList[5], 'y*', label='POW(pt,0.1)')
plt.plot(allXValueList[6], allYValueList[6], 'ks', label='POW(pt,0)')
'''

plt.axis([0, 1, 0, 1])
plt.legend(loc="upper left")
plt.show()
exit(1)



inputFileList = []
outputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/LMs/GOV2/wholeLexicon_GOV2_unigramProbablity_fromJuan_20140709"
outputFileHandler = open(outputFileName,"w")

# ifn = "/home/vgc/juanr/datasets/prune/unigrams.idx"
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/LMs/GOV2/wholeLexicon_GOV2_unigramProbablity_fromJuan_20140707.binary"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "ifn: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4)
    (termID,probability) = unpack( "1I1f", byteString)
    # print currentTermID,termID,probability
    outputFileHandler.write(str(termID) + " " + str(probability) + "\n")
    numOfBytesRead += 4 + 4
inputFileHandler0.close()
print "Overall:"
print "ifn:",ifn
print "outputFileName:",outputFileName
outputFileHandler.close()
exit(1)



inputFileList = []
ifn = "/home/vgc/wei/workspace/NYU_IRTK/results/dynamicBigramFromJuan/documentPostingArrays/gov2/staticBigrams/allPostingPopped_bigrams-0.66-1.static_PART_OF_20140706_GOV2"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "ifn: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfPostingPopped = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
    (termID,docID,currentProbability,impactScore) = unpack( "2I2f", byteString)
    print termID,docID,currentProbability,impactScore
    numOfPostingPopped += 1
    if numOfPostingPopped >= 10:
        exit(1)
    numOfBytesRead += 16
inputFileHandler0.close()
print "Overall:"
print "ifn:",ifn
exit(1)

# simple read / write
ofn = "/home/vgc/wei/workspace/NYU_IRTK/data/LMs/GOV2/wholeLexicon_GOV2_unigramProbablity_fromJuan_20140707.binary"
ofh = open(ofn,"wb")

inputFileList = []
ifn = "/home/vgc/juanr/datasets/prune/unigrams.idx"
inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "ifn: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
currentTermID = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4)
    ofh.write(pack("1I",currentTermID))
    ofh.write(byteString)
    currentTermID += 1
    if currentTermID % 10000 == 0:
        sys.stdout.write("terms processed: %d   \r" % (currentTermID) )
        sys.stdout.flush()
    numOfBytesRead += 4
inputFileHandler0.close()
print "Overall:"
print "currentTermID:",currentTermID
print "ifn:",ifn
print "ofn:",ofn
ofh.close()
exit(1)

termIDANDTermDict = {}
inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/data/wholeLexiconTermsWithTermID_GOV2"
inputFileHanlder = open(inputFileName1,"r")
currentLine = inputFileHanlder.readline()
numOfTerms = 1
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = int(currentLineElements[0])
    currentTerm = currentLineElements[1]
    # print currentTermID,currentTerm
    if currentTermID not in termIDANDTermDict:
        termIDANDTermDict[currentTermID] = currentTerm
    
    if numOfTerms % 100000 == 0:
        print numOfTerms,"terms processed."
    
    currentLine = inputFileHanlder.readline()
    numOfTerms += 1
    
inputFileHanlder.close()
print "len(termIDANDTermDict):",len(termIDANDTermDict)

postingDictFromWeiStaticUnigram = {}
postingDictFromJuanStaticBigram = {}

inputFileList = []
ifn = "/home/vgc/wei/workspace/NYU_IRTK/results/dynamicBigramFromJuan/documentPostingArrays/gov2/staticBigrams/gov2AllPostingPopped_bigrams-0.66-1.static_PART_OF_201407060"
inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/gov2/allPostingPopped_GOV2_ourApproach_originalPt_originalPTopK_weight_0_since20140321"

inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "ifn: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
    (termID,docID,currentProbability,impactScore) = unpack( "2I2f", byteString)
    print termIDANDTermDict[termID],termID,docID,currentProbability,impactScore
    # print termID,docID,currentProbability,impactScore
    postingKey = str(termID) + "_" + str(docID)
    postingDictFromWeiStaticUnigram[postingKey] = 0
    if len(postingDictFromWeiStaticUnigram) >= 64519480:
        break
    numOfBytesRead += 16
inputFileHandler0.close()

inputFileHandler1 = open(inputFileName1,"rb")
statinfo = os.stat(inputFileName1)
fileSize = statinfo.st_size
print "inputFileName1: ",inputFileName1
print "file size:",fileSize
numOfBytesRead = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler1.read(4 + 4 + 4 + 4)
    (termID,docID,currentProbability,impactScore) = unpack( "2I2f", byteString)
    # print termIDANDTermDict[termID],termID,docID,currentProbability,impactScore
    postingKey = str(termID) + "_" + str(docID)
    postingDictFromJuanStaticBigram[postingKey] = 0
    if len(postingDictFromJuanStaticBigram) >= 64519480:
        break
    numOfBytesRead += 16
inputFileHandler1.close()

print "Overall:"
intersectionSet = set(postingDictFromJuanStaticBigram).intersection(postingDictFromWeiStaticUnigram)
unionSet = set(postingDictFromJuanStaticBigram).union(postingDictFromWeiStaticUnigram)
symmetricDifferenceRate = len(intersectionSet)/len(unionSet)
print "len(postingDictFromWeiStaticUnigram):",len(postingDictFromWeiStaticUnigram)
print "len(postingDictFromJuanStaticBigram):",len(postingDictFromJuanStaticBigram)
print "len(intersectionSet):",len(intersectionSet)
print "len(unionSet):",len(unionSet)
print "symmetricDifferenceRate:",symmetricDifferenceRate
print "Ends."
exit(1)

'''
termIDANDTermDict = {}
inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/data/wholeLexiconTermsWithTermID_GOV2"
inputFileHanlder = open(inputFileName1,"r")
currentLine = inputFileHanlder.readline()
numOfTerms = 1
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = int(currentLineElements[0])
    currentTerm = currentLineElements[1]
    # print currentTermID,currentTerm
    if currentTermID not in termIDANDTermDict:
        termIDANDTermDict[currentTermID] = currentTerm
    
    if numOfTerms % 100000 == 0:
        print numOfTerms,"terms processed."
    
    currentLine = inputFileHanlder.readline()
    numOfTerms += 1
    
inputFileHanlder.close()
print "len(termIDANDTermDict):",len(termIDANDTermDict)
'''

# /home/vgc/juanr/datasets/prune/bigrams-0.66-1.static
# /home/vgc/juanr/datasets/prune/bigrams-0.66-1.dynamic

ifn = "/home/vgc/juanr/datasets/prune/bigrams-0.66-1.static"
inputFileHandler0 = open(ifn,"rb")

statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "currentInputFileName: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
numOfDocumentsProcessed = 0
while numOfBytesRead < fileSize:
    
    # file handler 0
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    print "static bigram"
    print "docID:",docID,score1
    for i in range(0,score1):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
        # print "----->",i,termID,static,dynamic,combined,score1
        print "----->",i,termID,static,dynamic,combined,score1
        # print "----->",i,termIDANDTermDict[termID],termID,static,dynamic,combined,score1
    if docID == 1:
        exit(1)
    numOfDocumentsProcessed += 1
    numOfBytesRead += 8 + score1 * 4 * 5
        
inputFileHandler0.close()
print "Ends."
exit(1)

postingDictFromWeiStaticUnigram = {}

postingDictFromJuanStaticBigram = {}

inputFileList = []
ifn = "/home/vgc/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/gov2/allPostingPopped_GOV2_ourApproach_originalPt_originalPTopK_weight_0_since20140321"
inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/results/dynamicBigramFromJuan/documentPostingArrays/gov2/staticBigrams/allPostingPopped_WHOLE_bigrams.static_20140703_0_PART_OF"

inputFileHandler0 = open(ifn,"rb")
statinfo = os.stat(ifn)
fileSize = statinfo.st_size
print "ifn: ",ifn
print "file size:",fileSize
numOfBytesRead = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
    (termID,docID,currentProbability,impactScore) = unpack( "2I2f", byteString)
    # print termIDANDTermDict[termID],termID,docID,currentProbability,impactScore
    postingKey = str(termID) + "_" + str(docID)
    postingDictFromWeiStaticUnigram[postingKey] = 0
    if len(postingDictFromWeiStaticUnigram) >= 64519480:
        break
    numOfBytesRead += 16
inputFileHandler0.close()

inputFileHandler1 = open(inputFileName1,"rb")
statinfo = os.stat(inputFileName1)
fileSize = statinfo.st_size
print "inputFileName1: ",inputFileName1
print "file size:",fileSize
numOfBytesRead = 0
while numOfBytesRead < fileSize:
    byteString = inputFileHandler1.read(4 + 4 + 4 + 4)
    (termID,docID,currentProbability,impactScore) = unpack( "2I2f", byteString)
    # print termIDANDTermDict[termID],termID,docID,currentProbability,impactScore
    postingKey = str(termID) + "_" + str(docID)
    postingDictFromJuanStaticBigram[postingKey] = 0
    if len(postingDictFromJuanStaticBigram) >= 64519480:
        break
    numOfBytesRead += 16
inputFileHandler1.close()

print "Overall:" 
intersectionSet = set(postingDictFromJuanStaticBigram).intersection(postingDictFromWeiStaticUnigram)
unionSet = set(postingDictFromJuanStaticBigram).union(postingDictFromWeiStaticUnigram)
symmetricDifferenceRate = len(intersectionSet)/len(unionSet)
print "len(postingDictFromWeiStaticUnigram):",len(postingDictFromWeiStaticUnigram)
print "len(postingDictFromJuanStaticBigram):",len(postingDictFromJuanStaticBigram)
print "len(intersectionSet):",len(intersectionSet)
print "len(unionSet):",len(unionSet)
print "symmetricDifferenceRate:",symmetricDifferenceRate
print "Ends."
exit(1)

qidWithThresholdDict = {}
ifn = "/local_scratch/wei/workspace/NYU_IRTK/data/clueweb09B_09_12_termIDs_scores_added_20140630_top1000_OR"
inputFileHanlder = open(ifn,"r")
currentLine = inputFileHanlder.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    # print currentLineElements
    theRank = int(currentLineElements[3])
    theScore = float(currentLineElements[4])
    currentQID = currentLineElements[0]
    if theRank == 10:
        qidWithThresholdDict[currentQID] = theScore
        print currentQID,theScore
    currentLine = inputFileHanlder.readline()
print "qidWithThresholdDict['1']:",qidWithThresholdDict["1"]
print "qidWithThresholdDict['2']:",qidWithThresholdDict["2"]
inputFileHanlder.close()

outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/clueweb09B/trecCompatibleResults/ourApproach_static_withStopWords/trecCompatible_clueweb09B_static_withoutStopWords_20140626__0.08%_strictThreshold"
outputFileHandler = open(outputFileName,"w")

inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/clueweb09B/trecCompatibleResults/ourApproach_static_withStopWords/trecCompatible_clueweb09B_static_withoutStopWords_20140626__0.08%"
inputFileHandler = open(inputFileName1,"r")
# 66 Q0 clueweb09-en0005-84-26267 1 10.274900198 NYU_IRTK
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    currentScore = float(lineElements[4])
    if currentScore >= qidWithThresholdDict[currentQID]:
        outputFileHandler.write(line)

print "Overall:"
print "ifn:",ifn
print "inputFileName1:",inputFileName1
print "outputFileName:",outputFileName
inputFileHandler.close()
outputFileHandler.close()
exit(1)



# import matplotlib
# import matplotlib.pyplot
termIDANDTermDict = {}
inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/data/wholeLexiconTermsWithTermID_GOV2"
inputFileHanlder = open(inputFileName1,"r")
currentLine = inputFileHanlder.readline()
numOfTerms = 1
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = int(currentLineElements[0])
    currentTerm = currentLineElements[1]
    # print currentTermID,currentTerm
    if currentTermID not in termIDANDTermDict:
        termIDANDTermDict[currentTermID] = currentTerm
    
    if numOfTerms % 100000 == 0:
        print numOfTerms,"terms processed."
    
    currentLine = inputFileHanlder.readline()
    numOfTerms += 1
    
inputFileHanlder.close()
print "len(termIDANDTermDict):",len(termIDANDTermDict)

inputFileList = []
ifn = "/home/vgc/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/allPostingsBeingPopped20140321Morning_weight_0_WHOLE"
# ifn = "/home/vgc/wei/workspace/NYU_IRTK/results/dynamicBigramFromJuan/documentPostingArrays/gov2/staticBigrams/allPostingPopped_WHOLE_bigrams.static_20140703_0"
inputFileList.append(ifn)
numOfDocumentsProcessed = 0
for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "currentInputFileName: ",currentInputFileName
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
        (termID,docID,currentProbability,impactScore) = unpack( "2I2f", byteString)
        print termIDANDTermDict[termID],termID,docID,currentProbability,impactScore
        numOfBytesRead += 16
    inputFileHandler0.close()
print "Ends."
exit(1)



documentLowerBound = 0
outputFileBaseNamePart1 = "nohup ./irtk --local --cat < commandValue --config-options="
outputFileBaseNamePart2_1 = "document_posting_values_output_file_name_binary=/home/vgc/wei/workspace/NYU_IRTK/data/dynamicBigramFromJuan/documentPostingArrays/gov2/bigrams.static_withImpactScoreAdded_20140703_"
outputFileBaseNamePart2_2 = "\;"
outputFileBaseNamePart3_1 = "all_postings_being_popped_file_prefix_name_binary=/home/vgc/wei/workspace/NYU_IRTK/results/dynamicBigramFromJuan/documentPostingArrays/gov2/subPostingPopped_"
outputFileBaseNamePart3_2 = "_20140703\;"
outputFileBaseNamePart4_1 = "both_collection_lower_bound="
outputFileBaseNamePart4_2 = "\;"
outputFileBaseNamePart5_1 = "both_collection_upper_bound="
outputFileBaseNamePart5_2 = "\;"
outputFileBaseNamePart6_1 = " > onlineSubPostingPopping_"
outputFileBaseNamePart6_2 = "_20140703_LOG"
outputFileBaseNamePart7 = "&" 
for i in range(0,25):
    theActualLowerBound = str(documentLowerBound * 1000000)
    theActualUpperBound = str( (documentLowerBound+1) * 1000000 )
    theCommonPattern = str(documentLowerBound) + "M" + "_" + str(documentLowerBound+1) + "M"
    outputFileCompletePath = ""
    outputFileCompletePath += outputFileBaseNamePart1
    
    outputFileCompletePath += outputFileBaseNamePart2_1 
    outputFileCompletePath += theCommonPattern
    outputFileCompletePath += outputFileBaseNamePart2_2
    
    outputFileCompletePath += outputFileBaseNamePart3_1 
    outputFileCompletePath += theCommonPattern
    outputFileCompletePath += outputFileBaseNamePart3_2
    
    outputFileCompletePath += outputFileBaseNamePart4_1 
    outputFileCompletePath += theActualLowerBound
    outputFileCompletePath += outputFileBaseNamePart4_2
    
    outputFileCompletePath += outputFileBaseNamePart5_1 
    outputFileCompletePath += theActualUpperBound
    outputFileCompletePath += outputFileBaseNamePart5_2
    
    outputFileCompletePath += outputFileBaseNamePart6_1 
    outputFileCompletePath += theCommonPattern
    outputFileCompletePath += outputFileBaseNamePart6_2
    
    outputFileCompletePath += outputFileBaseNamePart7
    documentLowerBound += 1
    print outputFileCompletePath
    print
exit(1)

inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/realFreqOfTermsFor200HJQueries_Clueweb09B"

exit(1)

# /home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/ptPowToDot1/GOV2_documentPostingArray_0M_2M_ptPowToDot1_2DTableFromTOP100Postings_20140627.binary
# step1: Load the gov2 document posting arrays
inputFileList = []
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/GOV2_DocumentPostingArray_WHOLE_20140627_pt_2DTableFromTOP100Postings.binary"
inputFileList.append(ifn)
numOfDocumentsProcessed = 0
totalNumOfPostings = 0
for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "currentInputFileName: ",currentInputFileName
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        # print docID,score1
        totalNumOfPostings += score1
        '''
        if docID == 4:
            for i in range(0,score1):
                byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
                (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
                # print "----->",i,termID,static,dynamic,combined,score1
                print "----->",i,termID,static,score1
            print
            exit(1)
        '''
        numOfBytesRead += 8 + score1 * 4 * 5
        inputFileHandler0.seek(numOfBytesRead)
        # inputFileHandler0.seek(numOfBytesRead)
    inputFileHandler0.close()
print "totalNumOfPostings:",totalNumOfPostings
print "Ends."
exit(1)
# /home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/ptPowToDot1/GOV2_documentPostingArray_0M_2M_ptPowToDot1_2DTableFromTOP100Postings_20140627.binary
# step1: Load the gov2 document posting arrays
inputFileList = []
ifn = "/home/vgc/juanr/datasets/prune/bigrams.dyn"
inputFileList.append(ifn)
numOfDocumentsProcessed = 0
for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "currentInputFileName: ",currentInputFileName
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        print docID,score1
        '''
        if docID == 4:
            for i in range(0,score1):
                byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
                (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
                # print "----->",i,termID,static,dynamic,combined,score1
                print "----->",i,termID,static,score1
            print
            exit(1)
        '''
        numOfBytesRead += 8 + score1 * 4 * 4
        inputFileHandler0.seek(numOfBytesRead)
        # inputFileHandler0.seek(numOfBytesRead)
    inputFileHandler0.close()
print "Ends."
exit(1)

docIDAndTrecIDDict = {}
inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trecID_docID_MappingTableForGov2Dataset"
inputFileHanlder = open(inputFileName1,"r")
currentLine = inputFileHanlder.readline()
numOfDocs = 0
while currentLine:
    numOfDocs += 1
    if numOfDocs % 10000 == 0:
        sys.stdout.write("Docs processed: %d   \r" % (numOfDocs) )
        sys.stdout.flush()
    currentLineElements = currentLine.strip().split(" ")
    # Note: change for different format
    currentDocID = currentLineElements[1]
    currentTrecID = currentLineElements[0]
    docIDAndTrecIDDict[currentDocID] = currentTrecID
    currentLine = inputFileHanlder.readline()
print "len(docIDAndTrecIDDict):",len(docIDAndTrecIDDict)
# print "docIDAndTrecIDDict['0']:",docIDAndTrecIDDict['0']
inputFileHanlder.close()

ofn = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/important_debug_trecCompatible_weight_0_our_approach_20140702"
ofh = open(ofn,"w")

ifn = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/important_debug_trecCompatible_weight_0_our_approach_20140702_RAW"
ifh = open(ifn,"r")
for line in ifh.readlines():
    lineElements = line.strip().split(" ")
    currentDocID = lineElements[2]
    currentTrecID = docIDAndTrecIDDict[currentDocID]
    ol = lineElements[0] + " " + lineElements[1] + " " + currentTrecID + " " + lineElements[3] + " " + lineElements[4] + " " + lineElements[5] + "\n"
    ofh.write(ol)
ifh.close()
ofh.close()
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
exit(1)


trecIDANDDocIDDict = {}
ifn = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trecID_docID_MappingTableForGov2Dataset"
inputFileHandler0 = open(ifn,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTrecID = currentLineElements[0]
    currentDocID =  currentLineElements[1]
    if currentTrecID not in trecIDANDDocIDDict:
        trecIDANDDocIDDict[currentTrecID] = currentDocID  
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "len(trecIDANDDocIDDict):",len(trecIDANDDocIDDict)

outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/OR_top1000_100%_tb04-06_final_terms_added_docID_added"
outputFileHandler = open(outputFileName,"w")

inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/OR_top1000_100%_tb04-06_final_terms_added"
inputFileHandler0 = open(inputFileName1,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentTrecID = lineElements[2]
    currentDocID = trecIDANDDocIDDict[currentTrecID]
    outputLine = line.strip()
    outputFileHandler.write(outputLine + " " + currentDocID + "\n")
inputFileHandler0.close()
outputFileHandler.close()

print "Overall:"
print "ifn:",ifn
print "inputFileName1:",inputFileName1
print "outputFileName:",outputFileName
exit(1)

outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/OR_top1000_100%_tb04-06_final"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/OR_top10000_100%_tb04-06_final"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split("\t")
    theRank = int(lineElements[3])
    if theRank < 1000:
        outputFileHandler.write(line)
inputFileHandler0.close()
outputFileHandler.close()
print "overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)

qidWithQueryTermsDict = {}
ifn = "/local_scratch/wei/workspace/NYU_IRTK/data/OR_top100_100%_tb04-06_final_with_termID_and_score_added"
inputFileHandler0 = open(ifn,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    if currentQID not in qidWithQueryTermsDict:
        qidWithQueryTermsDict[currentQID] = []
        postingInfoList = lineElements[6:-1]
        numOfPostingInfos = int(len(postingInfoList) / 2)
        base = 0
        for i in range(0,numOfPostingInfos):
            currentTermID = postingInfoList[base]
            currentTermScore = float(postingInfoList[base+1])
            if currentTermID not in qidWithQueryTermsDict[currentQID]:
                qidWithQueryTermsDict[currentQID].append(currentTermID)
            base += 2
            
    else:
        pass

# print "Overall:"
# print "len(qidWithQueryTermsDict):",len(qidWithQueryTermsDict)
# print "qidWithQueryTermsDict['701']:",qidWithQueryTermsDict['701']
# print "qidWithQueryTermsDict['702']:",qidWithQueryTermsDict['702']
for currentQID in qidWithQueryTermsDict:
    # print currentQID
    outputLine = currentQID + " "
    for term in qidWithQueryTermsDict[currentQID]:
        outputLine += term + " " + "0.0" + " "
    outputLine = outputLine.strip()
    print outputLine
inputFileHandler0.close()
exit(1)

outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/OR_top1000_100%_tb04-06_final_with_termID_and_score_added"
outputFileHandler = open(outputFileName,"w")

inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/OR_top1000_100%_tb04-06_final"
inputFileHandler0 = open(inputFileName1,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split("\t")
    theRank = int(lineElements[3])
    if theRank < 100:
        outputFileHandler.write(line)
inputFileHandler0.close()
outputFileHandler.close()
print "overall:"
print "inputFileName1:",inputFileName1
print "outputFileName:",outputFileName
exit(1)







termAndTermIDDict = {}
ifn2 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BQueryTermsFromHJQueries_09_12_20140624"
ifh2 = open(ifn2,"r")
currentLine = ifh2.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTerm = currentLineElements[1]
    
    if currentTerm not in termAndTermIDDict:
        termAndTermIDDict[currentTerm] = currentTermID
    
    currentLine = ifh2.readline()
ifh2.close()
print "len(termAndTermIDDict):",len(termAndTermIDDict)

ofn = "/local_scratch/wei/workspace/NYU_IRTK/data/clueweb09B_09_12_termIDs_scores_added_20140630_top10000_OR"
ofh = open(ofn,"w")

ifn = "/local_scratch/wei/workspace/NYU_IRTK/data/clueweb09B_09_12_terms_scores_added_20140630_top10000_OR"
ifh = open(ifn,"r")

for line in ifh.readlines():
    lineElements = line.strip().split(" ")
    postingInfoList = lineElements[6:-1]
    numOfPostingInfos = int(len(postingInfoList) / 2)
    base = 0
    ol = lineElements[0] + " " + lineElements[1] + " " + lineElements[2] + " " + lineElements[3] + " " + lineElements[4] + " " + lineElements[5] + " "
    for i in range(0,numOfPostingInfos):
        currentTerm = postingInfoList[base]
        # currentTermScore = float(postingInfoList[base+1]) # The score1 is currently NOT used at all
        currentTermID = termAndTermIDDict[currentTerm]
        ol += currentTermID + " " + "0.0" + " "
        base += 2
    ol += lineElements[-1]
    ol = ol.strip()
    ofh.write(ol + "\n")
print "Overall:"
print "ifn:",ifn
print "ifn2:",ifn2
print "ofn:",ofn
exit(1)

ofn = "/local_scratch/wei/workspace/NYU_IRTK/data/clueweb09B_09_12_terms_scores_added_20140630_top10000_OR"
ofh = open(ofn,"w")

inputFileName2 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_09_12_rawResults_20140611_TOP2M_OR"
inputFileHanlder2 = open(inputFileName2,"r")
currentQueryID = ""
currentQueryTermList = []
currentLine = inputFileHanlder2.readline()
while currentLine:
    if currentLine.strip().startswith("qid:"):
        currentLineElements = currentLine.strip().split(" ")
        print "processing qid",currentLineElements[1]
        currentQueryID = currentLineElements[1]
        inputFileHanlder2.readline() # ignore this line
        currentQueryTermList = []
        termIndexPairList = inputFileHanlder2.readline().strip().split(" ")
        for termIndexPair in termIndexPairList:
            currentQueryTermList.append(termIndexPair.split(":")[0])
        # debug
        print currentQueryTermList
        currentRank = 0
    
    if currentLine.strip().startswith("Score:"):
        currentRank += 1
        if currentRank <= 10000:
            currentLineElements = currentLine.strip().split("\t")
            currentScoreStringPair = currentLineElements[0]
            currentDocIDStringPair = currentLineElements[1]
            currentTrecIDStringPair = currentLineElements[2]
            currentScore = currentScoreStringPair.strip().split(":")[1].strip()
            currentDocID = currentDocIDStringPair.strip().split(":")[1].strip()
            currentTrecID = currentTrecIDStringPair.strip().split(":")[1].strip()
            ol = str(currentQueryID) + " " + "Q0" + " " + str(currentTrecID) + " " + str(currentRank) + " " + str(currentScore) + " " + "NYU_IRTK" + " "
            for currentQueryTerm in currentQueryTermList:
                ol += currentQueryTerm + " " + "0.0" + " "
            ol += str(currentDocID)
            ol = ol.strip()
            print ol
            ofh.write(ol + "\n")
        else:
            pass
    
    currentLine = inputFileHanlder2.readline()      
inputFileHanlder2.close()
ofh.close()
print "Overall:"
print "inputFileName2:",inputFileName2
print "ofn:",ofn
exit(1)

docIDAndTrecIDDict = {}
inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BDocumentOverallStatistics_sortedByDocID_20140606"
inputFileHanlder = open(inputFileName1,"r")
currentLine = inputFileHanlder.readline()
numOfDocs = 0
while currentLine:
    numOfDocs += 1
    if numOfDocs % 10000 == 0:
        sys.stdout.write("Docs processed: %d   \r" % (numOfDocs) )
        sys.stdout.flush()
    currentLineElements = currentLine.strip().split(" ")
    # Note: change for different format
    currentDocID = currentLineElements[0]
    currentTrecID = currentLineElements[2]
    docIDAndTrecIDDict[currentDocID] = currentTrecID
    currentLine = inputFileHanlder.readline()
print "len(docIDAndTrecIDDict):",len(docIDAndTrecIDDict)
# print "docIDAndTrecIDDict['0']:",docIDAndTrecIDDict['0']
inputFileHanlder.close()

ofn = "/local_scratch/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/clueweb09B/debug_for_quality_20140630"
ofh = open(ofn,"w")

ifn = "/local_scratch/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/clueweb09B/debug"
ifh = open(ifn,"r")
for line in ifh.readlines():
    lineElements = line.strip().split(" ")
    currentDocID = lineElements[2]
    currentTrecID = docIDAndTrecIDDict[currentDocID]
    ol = lineElements[0] + " " + lineElements[1] + " " + currentTrecID + " " + lineElements[3] + " " + lineElements[4] + " " + lineElements[5] + "\n"
ifh.close()
ofh.close()
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
exit(1)









termIDANDTermDict = {}
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/debugForPtVariations_20140629"
inputFileHanlder = open(ifn,"r")
currentLine = inputFileHanlder.readline()
while currentLine:
    currentTermID = currentLine.strip()
    termIDANDTermDict[currentTermID] = 1
    currentLine = inputFileHanlder.readline()
print "len(termIDANDTermDict):",len(termIDANDTermDict)
print termIDANDTermDict
inputFileHanlder.close()

inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/data/wholeLexiconTermsWithTermID_GOV2"
inputFileHanlder = open(inputFileName1,"r")
currentLine = inputFileHanlder.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTerm = currentLineElements[1]
    # print currentTermID,currentTerm
    if currentTermID in termIDANDTermDict:
        print currentTermID,currentTerm
    currentLine = inputFileHanlder.readline()
inputFileHanlder.close()
exit(1)


# /home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/ptPowToDot1/GOV2_documentPostingArray_0M_2M_ptPowToDot1_2DTableFromTOP100Postings_20140627.binary
# step1: Load the gov2 document posting arrays
inputFileList = []
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/ptPowTo0/GOV2_documentPostingArray_0M_4M_ptPowTo0_2DTableFromTOP100Postings_20140627.binary"
inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/ptPowToDot1/GOV2_documentPostingArray_0M_4M_ptPowToDot1_2DTableFromTOP100Postings_20140627.binary"
inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/ptPowToDot3/GOV2_documentPostingArray_0M_4M_ptPowToDot3_2DTableFromTOP100Postings_20140627.binary"
inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/ptPowToDot5/GOV2_documentPostingArray_0M_4M_ptPowToDot5_2DTableFromTOP100Postings_20140627.binary"
inputFileName4 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/ptPowToDot7/GOV2_documentPostingArray_0M_4M_ptPowToDot7_2DTableFromTOP100Postings_20140627.binary"
inputFileName5 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/ptPowToDot9/GOV2_documentPostingArray_0M_4M_ptPowToDot9_2DTableFromTOP100Postings_20140627.binary"
inputFileName6 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/LEAVE_selectedDocumentPostingValuesInfo_0_6M_DOC_20140518_TOP100Postings.binary"
#inputFileList.append(ifn)
#inputFileList.append(inputFileName1)
#inputFileList.append(inputFileName2)
#inputFileList.append(inputFileName3)
#inputFileList.append(inputFileName4)
#inputFileList.append(inputFileName5)
inputFileList.append(inputFileName6)
numOfDocumentsProcessed = 0
for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "currentInputFileName: ",currentInputFileName
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        print docID,score1
        if docID == 4:
            for i in range(0,score1):
                byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
                (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
                # print "----->",i,termID,static,dynamic,combined,score1
                print "----->",i,termID,static,score1
            print
            exit(1)
        numOfBytesRead += 8 + score1 * 4 * 5
        inputFileHandler0.seek(numOfBytesRead)
        # inputFileHandler0.seek(numOfBytesRead)
    inputFileHandler0.close()
print "Ends."
exit(1)

print "Begins..."
# simple write
outputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/LMs/GOV2/wholeLexicon_GOV2_goodTuring_unigramProbablity_20140627.binary"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/LMs/GOV2/wholeLexicon_GOV2_goodTuring_unigramProbablity_20140602"
inputFileHandler0 = open(inputFileName,"r")

currentLine = inputFileHandler0.readline()
numOfQueriesFallingThrough = 1
while currentLine:
    if numOfQueriesFallingThrough % 1000000 == 0:
        print numOfQueriesFallingThrough,"processed."
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = int(currentLineElements[0])
    currentTermProbablity = float(currentLineElements[1]) 
    outputFileHandler.write(pack("1I1f", currentTermID, currentTermProbablity))
    currentLine = inputFileHandler0.readline()
    numOfQueriesFallingThrough += 1
inputFileHandler0.close()
outputFileHandler.close()
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
print "Ends."
exit(1)

# step1: Load the gov2 document posting arrays
inputFileList = []
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/LEAVE_selectedDocumentPostingValuesInfo_0_6M_DOC_20140518_TOP100Postings.binary"
inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/LEAVE_selectedDocumentPostingValuesInfo_6M_12M_DOC_20140518_TOP100Postings.binary"
inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/LEAVE_selectedDocumentPostingValuesInfo_12M_18Dot5M_DOC_20140518_TOP100Postings.binary"
inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/documentPostingArrays/gov2/LEAVE_selectedDocumentPostingValuesInfo_18Dot5M_25M_DOC_20140518_TOP100Postings.binary"
inputFileList.append(ifn)
#inputFileList.append(inputFileName1)
#inputFileList.append(inputFileName2)
#inputFileList.append(inputFileName3)

numOfDocumentsProcessed = 0
for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        print docID,score1
        '''
        for i in range(0,score1):
            byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
            (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
            print "----->",i,termID,static,dynamic,combined,score1
        '''
        numOfBytesRead += 8 + score1 * 4 * 5
        inputFileHandler0.seek(numOfBytesRead)
        # inputFileHandler0.seek(numOfBytesRead)
    inputFileHandler0.close()
print "Ends."
exit(1)


print "Begins..."
termList = []
termDict = {}
ifn = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/stopwords"
inputFileHandler0 = open(ifn,"r")
for line in inputFileHandler0.readlines():
    currentTerm = line.strip()
    termList.append(currentTerm)
    termDict[currentTerm] = 1
print termList
print "len(termList)",len(termList)
print "len(termDict)",len(termDict)
inputFileHandler0.close()

inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BIndexOverallStatistics_20140606"
inputFileHandler0 = open(inputFileName1,"r")
currentLine = inputFileHandler0.readline()
numOfTerms = 1
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTerm = currentLineElements[1]
    currentTermLength = currentLineElements[2]
    
    if currentTerm in termDict:
        print currentTermID,currentTerm,currentTermLength
    
    currentLine = inputFileHandler0.readline()
    numOfTerms += 1
inputFileHandler0.close()

print "Ends."
exit(1)




"Q0 clueweb09-en0005-99-19832 1 1.768687 NYU_IRTK"
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/qualityEvaluationTools/debug"
ofh = open(ofn,"w")
for i in range(1,201):
    print i
    ol = str(i) + " " + "Q0 clueweb09-en0005-99-19832 1 1.768687 NYU_IRTK" + "\n"
    ofh.write(ol)
print "Overall:"
print "ofn:",ofn
ofh.close()
exit(1)

inputFileList = []
inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/clueweb09B/allPostingPopped_10%_clueweb09B"
inputFileList.append(inputFileName)

for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
        (termID,docID,finalProbability,impactScore) = unpack( "2I2f", byteString)
        print termID,docID,finalProbability,impactScore
        # inputFileHandler0.seek(numOfBytesRead)
    inputFileHandler0.close()
print "Ends."
exit(1)



outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/clueweb09B/qrels.web09_12catA_withDocIDAdded.txt"
outputFileHandler = open(outputFileName,"w")

inputFileName2 = "/local_scratch/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/clueweb09B/qrels.web09_12catA.txt"
inputFileHanlder = open(inputFileName2,"r")
currentLine = inputFileHanlder.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTrecID = currentLineElements[2]
    if currentTrecID not in docIDAndTrecIDDict:
        outputFileHandler.write(currentLine.strip() + " " + "-1" + "\n")
    else:
        outputFileHandler.write(currentLine.strip() + " " + docIDAndTrecIDDict[currentTrecID] + "\n")
    currentLine = inputFileHanlder.readline()

print "Overall:"
print "len(docIDAndTrecIDDict):",len(docIDAndTrecIDDict)
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
inputFileHanlder.close()
outputFileHandler.close()
exit(1)

inputFileList = []
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/subPostingPopped_0_2M_20140624"
inputFileList.append(inputFileName)

for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
        (termID,docID,finalProbability,impactScore) = unpack( "2I2f", byteString)
        print termID,docID,finalProbability,impactScore
        # inputFileHandler0.seek(numOfBytesRead)
    inputFileHandler0.close()
print "Ends."
exit(1)

inputFileList = []
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/LEAVE_documentPostingArray_clueweb09B_12M_14M_gray10_20140624.binary"
inputFileList.append(inputFileName)

numOfDocumentsProcessed = 0
for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        print docID,score1
        if docID == 12853742:
            for i in range(0,score1):
                byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
                (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
                print "----->",i,termID,static,dynamic,combined,score1
            exit(1)
        numOfBytesRead += 8 + score1 * 4 * 5
        inputFileHandler0.seek(numOfBytesRead)
        # inputFileHandler0.seek(numOfBytesRead)
    inputFileHandler0.close()
print "Ends."
exit(1)

'''
# debug
machineNumber = 4
documentLowerBound = 0
fileSize = 2    # in M documents
outputFileBaseNamePart1 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/"
outputFileBaseNamePart2 = "LEAVE_documentPostingArray_clueweb09B_"
outputFileBaseNamePart3 = "_20140623.binary"
for i in range(0,12):
    outputFileCompletePath = outputFileBaseNamePart1 + outputFileBaseNamePart2 + str(documentLowerBound) + "M" + "_" + str(documentLowerBound+2) + "M" + "_gray" + str(machineNumber).zfill(2) + outputFileBaseNamePart3
    documentLowerBound += 2
    machineNumber += 1
    print i,outputFileCompletePath
outputFileCompletePath = outputFileBaseNamePart1 + outputFileBaseNamePart2 + str(documentLowerBound) + "M" + "_" + "END" + "_gray" + str(machineNumber).zfill(2) + outputFileBaseNamePart3
print (i+1),outputFileCompletePath
exit(1)
'''

machineNumber = 4
documentLowerBound = 0
outputFileBaseNamePart1 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/"
outputFileBaseNamePart2 = "LEAVE_documentPostingArray_clueweb09B_"
outputFileBaseNamePart3 = "_20140624.binary"

inputFileList = []
inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/LEAVE_documentPostingArray_clueweb09B_WHOLE_20140620.binary"
inputFileList.append(inputFileName)

outputFileName = outputFileCompletePath = outputFileBaseNamePart1 + outputFileBaseNamePart2 + str(documentLowerBound) + "M" + "_" + str(documentLowerBound+2) + "M" + "_gray" + str(machineNumber).zfill(2) + outputFileBaseNamePart3
print "Open:",outputFileCompletePath
outputFileHandler = open(outputFileName,"wb")

numOfDocumentsProcessed = 0
for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        outputFileHandler.write(byteString)
        # print docID,score1
        
        '''
        # debug
        if docID == 20000:
            print docID,score1
            for i in range(0,score1):
                byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
                (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
                print "----->",i,termID,static,dynamic,combined,score1
        '''
        
        byteString = inputFileHandler0.read(score1 * 4 * 5)
        outputFileHandler.write(byteString)
        
        numOfDocumentsProcessed += 1   

        if numOfDocumentsProcessed % 25000000 == 0:
            outputFileHandler.close()
            print "Close:",outputFileCompletePath,os.stat(outputFileCompletePath).st_size
            print "25M documents done."
            exit(1)
        
        if numOfDocumentsProcessed % 2000000 == 0:
            outputFileHandler.close()
            print "Close:",outputFileCompletePath,os.stat(outputFileCompletePath).st_size
            documentLowerBound += 2
            machineNumber += 1
            if documentLowerBound != 24:
                outputFileCompletePath = outputFileBaseNamePart1 + outputFileBaseNamePart2 + str(documentLowerBound) + "M" + "_" + str(documentLowerBound+2) + "M" + "_gray" + str(machineNumber).zfill(2) + outputFileBaseNamePart3
            elif documentLowerBound == 24:
                outputFileCompletePath = outputFileBaseNamePart1 + outputFileBaseNamePart2 + str(documentLowerBound) + "M" + "_" + str(documentLowerBound+1) + "M" + "_gray" + str(machineNumber).zfill(2) + outputFileBaseNamePart3
            print "Open:",outputFileCompletePath
            outputFileHandler = open(outputFileCompletePath,"wb")
        
        if numOfDocumentsProcessed % 10000 == 0:
            # print "Docs processed:",numOfDocumentsProcessed,"\r"
            sys.stdout.write("Docs processed: %d   \r" % (numOfDocumentsProcessed) )
            sys.stdout.flush()
        numOfBytesRead += 8 + score1 * 4 * 5
        inputFileHandler0.seek(numOfBytesRead)
        # inputFileHandler0.seek(numOfBytesRead)
    inputFileHandler0.close()
print "Ends."
exit(1)



numOfPostingsInTotal = 6451948010
numOfPostingsAt1Percent = int(numOfPostingsInTotal * 0.01)
numOfPostingsAt2Percent = int(numOfPostingsInTotal * 0.02)
numOfPostingsAt3Percent = int(numOfPostingsInTotal * 0.03)
numOfPostingsAt4Percent = int(numOfPostingsInTotal * 0.04)
numOfPostingsAt5Percent = int(numOfPostingsInTotal * 0.05)
numOfPostingsAt6Percent = int(numOfPostingsInTotal * 0.06)
numOfPostingsAt7Percent = int(numOfPostingsInTotal * 0.07)
numOfPostingsAt8Percent = int(numOfPostingsInTotal * 0.08)
numOfPostingsAt9Percent = int(numOfPostingsInTotal * 0.09)
numOfPostingsAt10Percent = int(numOfPostingsInTotal * 0.1)
numOfPostingsAt15Percent = int(numOfPostingsInTotal * 0.15)
numOfPostingsAt20Percent = int(numOfPostingsInTotal * 0.2)
numOfPostingsAt30Percent = int(numOfPostingsInTotal * 0.3)
numOfPostingsAt40Percent = int(numOfPostingsInTotal * 0.4)
numOfPostingsAt50Percent = int(numOfPostingsInTotal * 0.5)
numOfPostingsAt60Percent = int(numOfPostingsInTotal * 0.6)
numOfPostingsAt70Percent = int(numOfPostingsInTotal * 0.7)
numOfPostingsAt80Percent = int(numOfPostingsInTotal * 0.8)
numOfPostingsAt90Percent = int(numOfPostingsInTotal * 0.9)
numOfPostingsAt1PercentTag = True
numOfPostingsAt2PercentTag = True
numOfPostingsAt3PercentTag = True
numOfPostingsAt4PercentTag = True
numOfPostingsAt5PercentTag = True
numOfPostingsAt6PercentTag = True
numOfPostingsAt7PercentTag = True
numOfPostingsAt8PercentTag = True
numOfPostingsAt9PercentTag = True
numOfPostingsAt10PercentTag = True
numOfPostingsAt15PercentTag = True
numOfPostingsAt20PercentTag = True
numOfPostingsAt30PercentTag = True
numOfPostingsAt40PercentTag = True
numOfPostingsAt50PercentTag = True
numOfPostingsAt60PercentTag = True
numOfPostingsAt70PercentTag = True
numOfPostingsAt80PercentTag = True
numOfPostingsAt90PercentTag = True

# for dodo:
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_Xdocs_and_variations_sortedByXdocDividedByLogNum_20150505"
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_Xdocs_and_variations_sortedByXdocMultipleByLogNum_20150505"
# for vidaserver1
# inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySum_PTopK_PowTo0"
# inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySum_PTopK_PowTo1"
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140621_sortedBySum_PTopK_PowTo1"
# inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySum_PTopK_PowTo2"
# inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySum_PTopK_PowTo3"
# inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySum_PTopK_PowTo4"
# inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySumImpactScores"
# Updated by Wei 2014/06/22
# inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140621_sortedBySum_PTopK_PowToDot3"
# inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140621_sortedBySum_PTopK_PowToDot7"
# inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140621_sortedBySum_PTopK_PowToDot7"

inputFileHandler0 = open(inputFileName,"r")
numOfDocumentsCanBeIncluded = 0
numOfPostingsCurrent = 0

currentLine = inputFileHandler0.readline()
numOfDocumentsCanBeIncluded += 1
while currentLine:
    numOfPostingsCurrent += int(currentLine.strip().split(" ")[1])

    if numOfPostingsCurrent >= numOfPostingsAt1Percent and numOfPostingsAt1PercentTag:
        print "Overall:"
        print "1%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt1Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt1Percent
        numOfPostingsAt1PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt2Percent and numOfPostingsAt2PercentTag:
        print "Overall:"
        print "2%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt1Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt2Percent
        numOfPostingsAt2PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt3Percent and numOfPostingsAt3PercentTag:
        print "Overall:"
        print "3%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt3Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt3Percent
        numOfPostingsAt3PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt4Percent and numOfPostingsAt4PercentTag:
        print "Overall:"
        print "4%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt4Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt4Percent
        numOfPostingsAt4PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt5Percent and numOfPostingsAt5PercentTag:
        print "Overall:"
        print "5%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt5Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt5Percent
        numOfPostingsAt5PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt6Percent and numOfPostingsAt6PercentTag:
        print "Overall:"
        print "6%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt6Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt6Percent
        numOfPostingsAt6PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt7Percent and numOfPostingsAt7PercentTag:
        print "Overall:"
        print "7%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt7Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt7Percent
        numOfPostingsAt7PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt8Percent and numOfPostingsAt8PercentTag:
        print "Overall:"
        print "8%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt8Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt8Percent
        numOfPostingsAt8PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt9Percent and numOfPostingsAt9PercentTag:
        print "Overall:"
        print "9%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt9Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt9Percent
        numOfPostingsAt9PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt10Percent and numOfPostingsAt10PercentTag:
        print "Overall:"
        print "10%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt10Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt10Percent
        numOfPostingsAt10PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt15Percent and numOfPostingsAt15PercentTag:
        print "Overall:"
        print "15%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt15Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt15Percent
        numOfPostingsAt15PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt20Percent and numOfPostingsAt20PercentTag:
        print "Overall:"
        print "20%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt20Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt20Percent
        numOfPostingsAt20PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt30Percent and numOfPostingsAt30PercentTag:
        print "Overall:"
        print "30%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt30Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt30Percent
        numOfPostingsAt30PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt40Percent and numOfPostingsAt40PercentTag:
        print "Overall:"
        print "40%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt40Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt40Percent
        numOfPostingsAt40PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt50Percent and numOfPostingsAt50PercentTag:
        print "Overall:"
        print "50%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt50Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt50Percent
        numOfPostingsAt50PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt60Percent and numOfPostingsAt60PercentTag:
        print "Overall:"
        print "60%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt60Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt60Percent
        numOfPostingsAt60PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt70Percent and numOfPostingsAt70PercentTag:
        print "Overall:"
        print "70%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt70Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt70Percent
        numOfPostingsAt70PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt80Percent and numOfPostingsAt80PercentTag:
        print "Overall:"
        print "80%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt80Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt80Percent
        numOfPostingsAt80PercentTag = False

    if numOfPostingsCurrent >= numOfPostingsAt90Percent and numOfPostingsAt90PercentTag:
        print "Overall:"
        print "90%"
        print "numOfDocumentsCanBeIncluded,numOfPostingsCurrent,numOfPostingsAt90Percent"
        print numOfDocumentsCanBeIncluded-1,numOfPostingsCurrent,numOfPostingsAt90Percent
        numOfPostingsAt90PercentTag = False

    currentLine = inputFileHandler0.readline()
    numOfDocumentsCanBeIncluded += 1
inputFileHandler0.close()
exit(1)

numOfDocumentsIncluded = 16200

# what is the overlap document set between the Xdoc and the P(t) * POW(P(topK | t),{0,1,2,3,4}) 
ifn = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySum_PTopK_PowTo0"
inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySum_PTopK_PowTo1"
inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySum_PTopK_PowTo2"
inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySum_PTopK_PowTo3"
inputFileName4 = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySum_PTopK_PowTo4"
inputFileName5 = "/home/vgc/wei/workspace/NYU_IRTK/data/Gov2_XdocCombinedWithPTOPK_WHOLE_20140620_sortedBySumImpactScores"
inputFileList = []
inputFileList.append(ifn)
inputFileList.append(inputFileName1)
inputFileList.append(inputFileName2)
inputFileList.append(inputFileName3)
inputFileList.append(inputFileName4)
inputFileList.append(inputFileName5)
docDictList = []

for currentInputFileName in inputFileList:
    currentDocDict = {}
    inputFileHanlder = open(currentInputFileName,"r")
    currentLine = inputFileHanlder.readline()
    while currentLine:
        currentLineElements = currentLine.strip().split(" ")
        # Note: change for different format
        currentDocID = currentLineElements[0]
        
        if currentDocID not in currentDocDict:
            currentDocDict[currentDocID] = 1
        else:
            print "duplicated docID detected."
            exit(1)
        
        if len(currentDocDict) == numOfDocumentsIncluded:
            break
        currentLine = inputFileHanlder.readline()
    print "len(currentDocDict):",len(currentDocDict)
    docDictList.append(currentDocDict)
    inputFileHanlder.close()

print "Overall calculation:"
print "len(docDictList):",len(docDictList)
print "len(docDictList[0]):",len(docDictList[0])
print "len(docDictList[1]):",len(docDictList[1])
print "len(docDictList[2]):",len(docDictList[2])
print "len(docDictList[3]):",len(docDictList[3])
print "len(docDictList[4]):",len(docDictList[4])
print "len(docDictList[5]):",len(docDictList[5])
for currentDocDict in docDictList:
    intersectionSet = set(currentDocDict).intersection(docDictList[0])
    unionSet = set(currentDocDict).union(docDictList[0])
    symmetricDifferenceRate = len(intersectionSet)/len(unionSet)
    print len(currentDocDict),len(docDictList[0]),len(intersectionSet),len(unionSet),symmetricDifferenceRate
exit(1)

'''
intersectionSet = set(currentDocDict).intersection(docIDDict2)
unionSet = set(currentDocDict).union(docIDDict2)
symmetricDifferenceRate = len(intersectionSet)/len(unionSet)
print "1%",len(currentDocDict),len(docIDDict2),len(intersectionSet),len(unionSet),symmetricDifferenceRate
'''
exit(1)

inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/offlineDocumentAnalysis_clueweb09B_20140620_LOG_Final_PART_OF_Reformatted2"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
sumCheck = 0
previousLine = ""
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    # the sum check
    if sumCheck != int(currentLineElements[4]):
        print "Problem Found."
        print previousLine.strip()
        print currentLine.strip()
        print sumCheck,int(currentLineElements[4])
        exit(1)
    sumCheck += int(currentLineElements[3])
    previousLine = currentLine
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
exit(1)

xPoints = []
yPoints = []

'''
inputFileName = "/xDocRelated/outputXDocRelatedValues_WHOLE_20140603_xDocValues_sortedByXdocValue"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
lineNumber = 1
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentNumOfPostings = int(currentLineElements[1])
    currentXdocValue = float(currentLineElements[2])

    
    #xPoints.append(currentNumOfPostings)
    #yPoints.append(currentXdocValue)
    

    if lineNumber not in randomLineNumberDict:
        pass
    else:
        xPoints.append(currentNumOfPostings)
        yPoints.append(currentXdocValue)

    if lineNumber % 1000000 == 0:
        print "lineNumber:",lineNumber,"processed"

    currentLine = inputFileHandler0.readline()
    lineNumber += 1
inputFileHandler0.close()
'''
xPoints.append(1)
xPoints.append(2)
xPoints.append(3)

yPoints.append(1)
yPoints.append(2)
yPoints.append(3)
print len(xPoints)
print len(yPoints)


matplotlib.pyplot.scatter(xPoints,yPoints)
matplotlib.pyplot.show()
exit(1)

inputFileList = []
inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/data/dynamicUnigramFromWei/LEAVE_documentPostingArray_clueweb09B_WHOLE_20140617.binary_PART_OF"
inputFileList.append(inputFileName)

for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        # print docID,score1
        if docID == 20000:
            print docID,score1
            for i in range(0,score1):
                byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
                (termID,static,dynamic,combined,score1) = unpack( "1I4f", byteString)
                print "----->",i,termID,static,dynamic,combined,score1
        
        numOfBytesRead += 8 + score1 * 4 * 5
        inputFileHandler0.seek(numOfBytesRead)
        # inputFileHandler0.seek(numOfBytesRead)
        
        if docID == 20000:
            exit(1)
    inputFileHandler0.close()
print "Ends."
exit(1)

inputFileList = []
inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_simple_document_posting_array_20140606"
inputFileList.append(inputFileName)

for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        # print docID,score1
        
        for i in range(0,score1):
            byteString = inputFileHandler0.read(4)
            (termID,) = unpack( "1I", byteString)
            if docID == 1000:
                print i,termID
        
        numOfBytesRead += 8 + score1 * 4
        inputFileHandler0.seek(numOfBytesRead)
        # inputFileHandler0.seek(numOfBytesRead)
        if docID == 1000:
            exit(1)
    inputFileHandler0.close()
print "Ends."
exit(1)







# key: qID
# value: list of document candidates
qIDWithTOPKDocumentResultFile_AND_Dict = {}
qIDWithTOPKDocumentResultFile_OR_Dict = {}
documentResult_AND_dict = {}
documentResult_OR_dict = {}
TopK_AND = 10
TopK_OR = 10

# for clueweb09B from year2009 - year2012
# AND semantics
inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_09_12WebTrack_rawResults_20140611_TOP100_AND"
# OR semantics
inputFileName2 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_09_12WebTrack_rawResults_20140611_TOP100_OR"

# for clueweb09B from year2009
# AND semantics
# inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_09WebTrack_rawResults_20140611_TOP2M_AND"
# OR semantics
# inputFileName2 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_09WebTrack_rawResults_20140611_TOP2M_OR"


print "**********AND"
inputFileHandler1 = open(inputFileName1,"r")
currentQueryID = ""
currentLine = inputFileHandler1.readline()
while currentLine:
    if currentLine.strip().startswith("qid:"):
        currentLineElements = currentLine.strip().split(" ")
        print "QID:",currentLineElements[1],
        currentQueryID = currentLineElements[1]
        
        if currentQueryID not in qIDWithTOPKDocumentResultFile_AND_Dict and int(currentQueryID) <= 50:
            qIDWithTOPKDocumentResultFile_AND_Dict[currentQueryID] = []
        
    if currentLine.strip().startswith("Score:"):
        currentLineElements = currentLine.strip().split("\t")
        currentDocIDStringPair = currentLineElements[1]
        currentTrecIDStringPair = currentLineElements[2]
        currentDocID = currentDocIDStringPair.strip().split(":")[1].strip()
        currentTrecID = currentTrecIDStringPair.strip().split(":")[1].strip()
        # print currentDocID,currentTrecID
        if currentQueryID in qIDWithTOPKDocumentResultFile_AND_Dict and len(qIDWithTOPKDocumentResultFile_AND_Dict[currentQueryID]) < TopK_AND:
            currentDocumentResultKey = currentQueryID + "_" + currentTrecID
            qIDWithTOPKDocumentResultFile_AND_Dict[currentQueryID].append(currentDocumentResultKey)
            documentResult_AND_dict[currentDocumentResultKey] = 1
            
    if currentLine.strip().startswith("Showing"):
        currentTotalNumOfResults = currentLine.strip().split(".")[0].split(" ")[-1]
        print currentTotalNumOfResults
    
    currentLine = inputFileHandler1.readline()

print "**********OR"
inputFileHanlder2 = open(inputFileName2,"r")
currentQueryID = ""
currentLine = inputFileHanlder2.readline()
while currentLine:
    if currentLine.strip().startswith("qid:"):
        currentLineElements = currentLine.strip().split(" ")
        print "QID:",currentLineElements[1],
        currentQueryID = currentLineElements[1]
        
        if currentQueryID not in qIDWithTOPKDocumentResultFile_OR_Dict and int(currentQueryID) <= 50:
            qIDWithTOPKDocumentResultFile_OR_Dict[currentQueryID] = []
        
    if currentLine.strip().startswith("Score:"):
        currentLineElements = currentLine.strip().split("\t")
        currentDocIDStringPair = currentLineElements[1]
        currentTrecIDStringPair = currentLineElements[2]
        currentDocID = currentDocIDStringPair.strip().split(":")[1].strip()
        currentTrecID = currentTrecIDStringPair.strip().split(":")[1].strip()
        # print currentDocID,currentTrecID
        if currentQueryID in qIDWithTOPKDocumentResultFile_AND_Dict and len(qIDWithTOPKDocumentResultFile_OR_Dict[currentQueryID]) < TopK_OR:
            currentDocumentResultKey = currentQueryID + "_" + currentTrecID
            qIDWithTOPKDocumentResultFile_OR_Dict[currentQueryID].append(currentDocumentResultKey)
            documentResult_OR_dict[currentDocumentResultKey] = 1
        
    if currentLine.strip().startswith("Showing"):
        currentTotalNumOfResults = currentLine.strip().split(".")[0].split(" ")[-1]
        print currentTotalNumOfResults
    
    currentLine = inputFileHanlder2.readline()

intersectionSet = set(documentResult_AND_dict).intersection( set(documentResult_OR_dict) )
unionSet = set(documentResult_AND_dict).union( set(documentResult_OR_dict) )

print "len(documentResult_AND_dict):",len(documentResult_AND_dict)
print "len(documentResult_OR_dict):",len(documentResult_OR_dict)
print "len(unionSet):",len(unionSet)
print "len(intersectionSet):",len(intersectionSet)
# print "len(intersectionSet)/len(unionSet): ",len(intersectionSet)/len(unionSet)
# print "unionSet-intersectionSet: ",unionSet-intersectionSet
# print set(documentResult_AND_dict).symmetric_difference( set(documentResult_OR_dict) )
print len(set(documentResult_AND_dict).symmetric_difference( set(documentResult_OR_dict) ))
print "Overall:"
print len(documentResult_AND_dict),"\t",len(documentResult_OR_dict),"\t",len(unionSet),"\t",len(intersectionSet),"\t",len(set(documentResult_AND_dict).symmetric_difference( set(documentResult_OR_dict) ))
print unionSet - intersectionSet
exit(1)

# construct the commands:
outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/testScript.sh"
outputFileHandler = open(outputFileName,"w")
percentageList = [1,2,3,4,5,6,7,8,9,10,15,20,30,40,50,60,70,80,90,100]
baseCommandLinePart1 = "awk \'{if($6 == \"PolyIRTKDebug\") print $1,$2,$3,$4,$5,$6,$7}' results_xDoc_clueweb09B_09_pAt10_"
baseCommandLinePart2 = "%_OR_RAW"
baseCommandLinePart3 = " > "
baseCommandLinePart4 = "results_xDoc_clueweb09B_09_pAt10_"
baseCommandLinePart5 = "%_OR"

for percentageLevel in percentageList:
    completeCommand = baseCommandLinePart1 + str(percentageLevel) + baseCommandLinePart2 + baseCommandLinePart3 + baseCommandLinePart4 + str(percentageLevel) + baseCommandLinePart5
    print "completeCommand:",completeCommand
    outputFileHandler.write(completeCommand + "\n")
print "Overall:"
print "outputFileName:",outputFileName
outputFileHandler.close()
exit(1)





inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/DEBUG_rawResults_1%_TOP10_AND_20140125Afternoon"
inputFileHandler0 = open(inputFileName,"r")
counter = 0
for line in inputFileHandler0.readlines():
    actualScore = float(line.strip().split(" ")[-1])
    if actualScore != 0.0:
        counter += 1
        print line.strip()
print "counter:",counter
inputFileHandler0.close()
exit(1)


inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/qualityEvaluationTools/numOfPostings_OR_allMeasures_raw"
inputFileHandler0 = open(inputFileName,"r")
counter = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    # print lineElements
    if lineElements[0] == "P_10":
        print counter,lineElements[-1].split("\t")[-1]
        counter += 1
inputFileHandler0.close()
exit(1)

inputFileList = []
inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_1%_RAW_OR"
inputFileName2 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_2%_RAW_OR"
inputFileName3 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_3%_RAW_OR"
inputFileName4 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_4%_RAW_OR"
inputFileName5 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_5%_RAW_OR"
inputFileName6 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_6%_RAW_OR"
inputFileName7 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_7%_RAW_OR"
inputFileName8 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_8%_RAW_OR"
inputFileName9 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_9%_RAW_OR"
inputFileName10 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_10%_RAW_OR"
inputFileName11 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_15%_RAW_OR"
inputFileName12 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_20%_RAW_OR"
inputFileName13 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_30%_RAW_OR"
inputFileName14 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_40%_RAW_OR"
inputFileName15 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_50%_RAW_OR"
inputFileName16 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_60%_RAW_OR"
inputFileName17 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_70%_RAW_OR"
inputFileName18 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_80%_RAW_OR"
inputFileName19 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_90%_RAW_OR"
inputFileName20 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDocDividedBySQRTNumOfPostings_clueweb09B_09_12_pAt10_100%_RAW_OR"

inputFileList.append(inputFileName1)
inputFileList.append(inputFileName2)
inputFileList.append(inputFileName3)
inputFileList.append(inputFileName4)
inputFileList.append(inputFileName5)
inputFileList.append(inputFileName6)
inputFileList.append(inputFileName7)
inputFileList.append(inputFileName8)
inputFileList.append(inputFileName9)
inputFileList.append(inputFileName10)
inputFileList.append(inputFileName11)
inputFileList.append(inputFileName12)
inputFileList.append(inputFileName13)
inputFileList.append(inputFileName14)
inputFileList.append(inputFileName15)
inputFileList.append(inputFileName16)
inputFileList.append(inputFileName17)
inputFileList.append(inputFileName18)
inputFileList.append(inputFileName19)
inputFileList.append(inputFileName20)

for inputFileName in inputFileList:
    outputFileName = inputFileName + "_Final"
    inputFileHandler0 = open(inputFileName,"r")
    outputFileHandler = open(outputFileName,"w")
    for line in inputFileHandler0.readlines():
        if len(line.strip().split(" ")) == 7 and line.strip().split(" ")[5] == "PolyIRTKDebug":
        # if len(line.strip().split("\t")) == 5 and line.strip().split("\t")[4] == "NYU_IRTK_Clueweb09":
            print line.strip()
            outputFileHandler.write(line)
    inputFileHandler0.close()
    outputFileHandler.close()
    print "Overall:"
    print inputFileName
    print outputFileName

exit(1)

inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDoc_clueweb09B_09_12_pAt10_100%_RAW_OR_Final"
inputFileHandler0 = open(inputFileName,"r")
outputFileName0 = inputFileName + "_part0_year2009_Final"
outputFileName1 = inputFileName + "_part1_year2010_Final"
outputFileName2 = inputFileName + "_part2_year2011_Final"
outputFileName3 = inputFileName + "_part3_year2012_Final"
outputFileHandler0 = open(outputFileName0,"w")
outputFileHandler1 = open(outputFileName1,"w")
outputFileHandler2 = open(outputFileName2,"w")
outputFileHandler3 = open(outputFileName3,"w")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    qid = int(lineElements[0])
    if qid >=1 and qid<=50:
        outputFileHandler0.write(line)
    elif qid >=51 and qid<=100:
        outputFileHandler1.write(line)
    elif qid >=101 and qid<=150:
        outputFileHandler2.write(line)
    elif qid >=151 and qid<=200:
        outputFileHandler3.write(line)
    else:
        print "unsupported."    
outputFileHandler0.close()
outputFileHandler1.close()
outputFileHandler2.close()
outputFileHandler3.close()
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName0:",outputFileName0
print "outputFileName1:",outputFileName1
print "outputFileName2",outputFileName2
print "outputFileName3",outputFileName3
exit(1)

inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDoc_clueweb09B_year2009_pAt10_1%_RAW"
outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDoc_related_results/results_xDoc_clueweb09B_year2009_pAt10_1%"

# inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/raw_results_UP_AND_clueweb09B_quality_20140615_10%_RAW"
# outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/raw_results_UP_AND_clueweb09B_quality_20140615_10%"
inputFileHandler0 = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")
for line in inputFileHandler0.readlines():
    # if line.strip().endswith("NYU_IRTK_Clueweb09"):
    if len(line.strip().split(" ")) == 7 and line.strip().split(" ")[5] == "PolyIRTKDebug":
        print line.strip()
        outputFileHandler.write(line)
inputFileHandler0.close()
outputFileHandler.close()
print "Overall:"
print inputFileName
print outputFileName
exit(1)



# for gov2
# on moa
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/secondFactorProbability/probabilityTable_fromTOP100Postings_20140518.csv"
# outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/secondFactorProbability/relRank_ProbabilityFromTOP100Postings_20140518_toolkit_friendly"
# for clueweb09B
# on vidaserver1
inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/data/secondFactorProbability/clueweb09B/clueweb09B_probability_2Dtable_20140613.csv"
outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/data/secondFactorProbability/clueweb09B/clueweb09B_probability_2Dtable_TOP100Postings_20140613_toolkit_friendly"
inputFileHandler0 = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    key1 = lineElements[0]
    for key2,value in enumerate(lineElements[1:]):
        key = str(key1) + "_" + str(key2)
        outputFileHandler.write(str(key) + " " + value + "\n")

print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
outputFileHandler.close()
inputFileHandler0.close()
exit(1)





initValue = 1.0
relRankClassUpperBound = initValue
print relRankClassUpperBound
for i in range(0,25):
    relRankClassUpperBound = relRankClassUpperBound / 2
    print relRankClassUpperBound
exit(1)

inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/data/secondFactorProbability/TOP100PostingsWithRanksANDScores_clueweb09B_trainingQueries_20140612"
inputFileHandler0 = open(inputFileName1,"r")
currentLine = inputFileHandler0.readline()
numOfQueriesFallingThrough = 1
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTerm = currentLineElements[1]
    rank = int(currentLineElements[3])
    totalListLength = int(currentLineElements[4])
    if rank > totalListLength:
        print rank
        print totalListLength
        print numOfQueriesFallingThrough
        exit(1)
    if numOfQueriesFallingThrough % 1000000 == 0:
        print numOfQueriesFallingThrough,"processed."
    currentLine = inputFileHandler0.readline()
    numOfQueriesFallingThrough += 1
inputFileHandler0.close()
print "Pass"
exit(1)

queryTermDict = {}
ifn = "/local_scratch/wei/workspace/NYU_IRTK/data/LMs/Clueweb09B/firstFactorProbablity/realFreqOfTermsFor_clueweb09B_queries_ALL_sortedByRandom_20140609_training"
inputFileHandler0 = open(ifn,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    if currentTerm not in queryTermDict:
        queryTermDict[currentTerm] = 1
print "len(queryTermDict):",len(queryTermDict) 
inputFileHandler0.close()

outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/data/secondFactorProbability/termPieceInfo_trainingQueryTerms_clueweb09B_20140613"
outputFileHandler = open(outputFileName,"w")

inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/data/secondFactorProbability/termPieceInfo_WHOLE_clueweb09B_20140613"
inputFileHandler0 = open(inputFileName1,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTerm = currentLineElements[1]
    if currentTerm in queryTermDict:
        outputFileHandler.write(currentLine)
    else:
        pass
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
outputFileHandler.close()
print "Overall:"
print "ifn:",ifn
print "inputFileName1:",inputFileName1
print "outputFileName:",outputFileName
exit(1)

# make the file term Piece Info For whole clueweb09B lexicon with stepGap to be 2
# key: list length class label
# value: # of ranges
listLengthClassWithNumOfRangesDict = {}
# key: list length class label
# value: lower bound threshold for this class
listLengthClassWithLowerBoundThresholdDict = {}
ifn = "/local_scratch/wei/workspace/NYU_IRTK/data/secondFactorProbability/predefinedClassLabelLowerBoundOfListLength_clueweb09B_20140612"
inputFileHandler0 = open(ifn,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentListLengthClassLabel = int(lineElements[0])
    currentLowerBoundThreshold = int(lineElements[1])
    currentNumOfRanges = int(lineElements[2])
    listLengthClassWithLowerBoundThresholdDict[currentListLengthClassLabel] = currentLowerBoundThreshold
    listLengthClassWithNumOfRangesDict[currentListLengthClassLabel] = currentNumOfRanges 
inputFileHandler0.close()
# print "len(listLengthClassWithLowerBoundThresholdDict):",len(listLengthClassWithLowerBoundThresholdDict)
# print "len(listLengthClassWithNumOfRangesDict):",len(listLengthClassWithNumOfRangesDict)

inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BIndexOverallStatistics_sortedByListLength_20140606"
inputFileHandler0 = open(inputFileName1,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTerm = currentLineElements[1]
    currentListLength = int(currentLineElements[2])
    outputLine = ""
    for i in range(0,76):
        if currentListLength >= listLengthClassWithLowerBoundThresholdDict[i] and currentListLength < listLengthClassWithLowerBoundThresholdDict[i+1]:
            currentNumOfRanges = listLengthClassWithNumOfRangesDict[i]
            break
    outputLine = str(currentTermID) + " " + str(currentTerm) + " " + str(currentListLength) + " " + str(i) + " " + str(currentNumOfRanges) + " "
    numOfPostingsInCurrentRange = 0
    numOfPostingsLeft = currentListLength
    for i in range(0,currentNumOfRanges-1):
        numOfPostingsInCurrentRange = int(numOfPostingsLeft / 2)
        numOfPostingsLeft = numOfPostingsLeft - numOfPostingsInCurrentRange
        outputLine += str(i) + " " + str(numOfPostingsInCurrentRange) + " "
    outputLine += str(currentNumOfRanges-1) + " " + str(numOfPostingsLeft)
    outputLine = outputLine.strip()
    print outputLine
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
exit(1)

inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/data/secondFactorProbability/predefinedClassLabelLowerBoundOfListLength_clueweb09B_20140612_Middle2"
inputFileHandler0 = open(inputFileName,"r")
outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/data/secondFactorProbability/predefinedClassLabelLowerBoundOfListLength_clueweb09B_20140612"
outputFileHandler = open(outputFileName,"w")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    if len(lineElements) == 3:
        outputFileHandler.write(line)
    else:
        numOfRanges = 1
        numOfPostingsLeft = int(lineElements[1])
        while numOfPostingsLeft > 20:
            numOfPostingsLeft = numOfPostingsLeft / 2
            numOfRanges += 1
        outputFileHandler.write(lineElements[0] + " " + lineElements[1] + " " + str(numOfRanges) + "\n")

inputFileHandler0.close()    
outputFileHandler.close()
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)

inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/data/secondFactorProbability/predefinedClassLabelLowerBoundOfListLength_clueweb09B_20140612_Middle"
inputFileHandler0 = open(inputFileName,"r")
for index,line in enumerate(inputFileHandler0.readlines()):
    print index,line.strip()
inputFileHandler0.close()
exit(1)

inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/clueweb09B/qrels.web09_12catA.txt_OLD"
inputFileHandler0 = open(inputFileName,"r")
outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/results/dynamicUnigramFromWei/clueweb09B/qrels.web09_12catA.txt"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    # ; |, |\*|\n
    lineElements = line.strip().split(" ")
    # print lineElements
    outputLine = ""
    for currentLineElement in lineElements:
        if currentLineElement.strip() != "":
            outputLine += currentLineElement.strip() + " "
    outputLine = outputLine.strip()
    outputLine += "\n"
    outputFileHandler.write(outputLine)
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)

inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDocValues_20140611_RAW"
inputFileHandler0 = open(inputFileName,"r")
outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/xDocValues_20140611"
outputFileHandler = open(outputFileName,"w")
currentLine = inputFileHandler0.readline()
lineShouldBeCounter = 0
numOfDocsAbsent = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = int(currentLineElements[0])
    if currentDocID == lineShouldBeCounter:
        outputFileHandler.write(currentLine)
    else:
        while currentDocID != lineShouldBeCounter:
            print "lineShouldBeCounter:",lineShouldBeCounter
            # print "currentDocID:",currentDocID
            # exit(1)
            outputFileHandler.write(str(lineShouldBeCounter) + " " + "0" + " " + "0" + "\n")
            numOfDocsAbsent += 1
            lineShouldBeCounter += 1
        outputFileHandler.write(currentLine)
    currentLine = inputFileHandler0.readline()
    lineShouldBeCounter += 1
inputFileHandler0.close()
outputFileHandler.close()
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
print "numOfDocsAbsent:",numOfDocsAbsent
exit(1)

'''
# compute the navigation file for the clueweb09B document posting array
totalNumOfPostings = 0
totalNumOfTerms = 0
currentInputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_simple_document_posting_array_20140606"
inputFileHandler0 = open(currentInputFileName,"r")
statinfo = os.stat(currentInputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize
inputFileHandler0.seek(440226348)
numOfBytes = 0
while numOfBytes < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        print docID,score1
        exit(1)
        totalNumOfPostings += score1
        totalNumOfTerms += 1
        numOfBytes += 4 + 4 + score1 * 4
        inputFileHandler0.seek(numOfBytes)
        
        if totalNumOfTerms % 100000 == 0:
            print totalNumOfTerms,numOfBytes

print "overall:"
print "numOfBytes: ",numOfBytes
print "totalNumOfPostings: ",totalNumOfPostings
print "totalNumOfTerms: :",totalNumOfTerms
print "Ends."
inputFileHandler0.close()
exit(1)
'''

# compute the navigation file for the clueweb09B document posting array
totalNumOfPostings = 0
totalNumOfTerms = 0
currentInputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_simple_document_posting_array_20140606"
inputFileHandler0 = open(currentInputFileName,"r")
statinfo = os.stat(currentInputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize
numOfBytes = 0
while numOfBytes < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        #print docID,score1
        #exit(1)
        totalNumOfPostings += score1
        totalNumOfTerms += 1
        numOfBytes += 4 + 4 + score1 * 4
        inputFileHandler0.seek(numOfBytes)
        
        if totalNumOfTerms % 100000 == 0:
            print docID+1,numOfBytes

print "overall:"
print "numOfBytes: ",numOfBytes
print "totalNumOfPostings: ",totalNumOfPostings
print "totalNumOfTerms: :",totalNumOfTerms
print "Ends."
inputFileHandler0.close()
exit(1)

numOfQuerySlotsInTotal = 0
inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/effectToMake1StFactor/good_turing_estimation_outputFor_clueweb09B_queries_ALL_sortedByRandom_20140609_training"
inputFileHandler0 = open(inputFileName,"r")
inputFileHandler0.readline()
inputFileHandler0.readline()
inputFileHandler0.readline()
inputFileHandler0.readline()
inputFileHandler0.readline()

for line in inputFileHandler0.readlines():
    currentLineElements = line.strip().split(" ")
    currentNumOfItemsBelongToThisSpecies = int(currentLineElements[1])
    numOfQuerySlotsInTotal += currentNumOfItemsBelongToThisSpecies
print "Overall:"
print "numOfQuerySlotsInTotal:",numOfQuerySlotsInTotal
inputFileHandler0.close()
exit(1)

numOfTermsInTotal = 0
inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/effectToMake1StFactor/freqOfFreqInQueriesFor_clueweb09B_queries_ALL_sortedByRandom_20140609_training"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    currentLineElements = line.strip().split(" ")
    currentNumOfTerms = int(currentLineElements[1])
    numOfTermsInTotal += currentNumOfTerms
print "Overall:"
print "numOfTermsInTotal:",numOfTermsInTotal
inputFileHandler0.close()
exit(1)

numOfQuerySlotsInTotal = 0
inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/effectToMake1StFactor/realFreqOfTermsFor_clueweb09B_queries_ALL_sortedByRandom_20140609_training"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    currentLineElements = line.strip().split(" ")
    currentNumOfQuerySlots = int(currentLineElements[1])
    numOfQuerySlotsInTotal += currentNumOfQuerySlots
print "Overall:"
print "numOfQuerySlotsInTotal:",numOfQuerySlotsInTotal
inputFileHandler0.close()
exit(1)




queryContentDict = {}
numOfDuplicatedTerms = 0
inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_queries_ALL"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(":")
    #print "lineElements:",lineElements
    #exit(1)
    queryContent = lineElements[1]
    if queryContent not in queryContentDict:
        queryContentDict[queryContent] = 1
    else:
        queryContentDict[queryContent] += 1
        numOfDuplicatedTerms += 1
print "Overall:"
print "len(queryContentDict):",len(queryContentDict)
print "numOfDuplicatedTerms:",numOfDuplicatedTerms
inputFileHandler0.close()
exit(1)

docIDNumOfPostingsDict = {}
inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_docID_numOfPostings_20140608_OLD"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = int(currentLineElements[0])
    score1 = currentLineElements[1]
    if currentDocID not in docIDNumOfPostingsDict:
        docIDNumOfPostingsDict[currentDocID] = score1
    currentLine = inputFileHandler0.readline()
print "len(docIDNumOfPostingsDict):",len(docIDNumOfPostingsDict)
inputFileHandler0.close()

outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_docID_numOfPostings_20140608"
outputFileHandler = open(outputFileName,"w")
totalNumOfPostings = 0
for i in range(0,50220423):
    if i not in docIDNumOfPostingsDict:
        outputFileHandler.write(str(i) + " " + "0" + "\n")
    else:
        outputFileHandler.write(str(i) + " " + docIDNumOfPostingsDict[i] + "\n")
        totalNumOfPostings += int(docIDNumOfPostingsDict[i])
print "overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
print "totalNumOfPostings:",totalNumOfPostings
exit(1)

totalNumOfPostings = 0
totalNumOfTerms = 0
outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_docID_numOfPostings_20140608"
outputFileHandler = open(outputFileName,"w")
currentInputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_simple_document_posting_array_20140606"
inputFileHandler0 = open(currentInputFileName,"r")
statinfo = os.stat(currentInputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize
numOfBytes = 0
while numOfBytes < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        outputFileHandler.write(str(docID) + " " + str(score1) + "\n")
        # print docID,score1
        totalNumOfPostings += score1
        totalNumOfTerms += 1
        numOfBytes += 4 + 4 + score1 * 4
        inputFileHandler0.seek(numOfBytes)
        if totalNumOfTerms % 100000 == 0:
            print "# of docs processed:",totalNumOfTerms

print "overall:"
print "outputFileName:",outputFileName
print "numOfBytes: ",numOfBytes
print "totalNumOfPostings: ",totalNumOfPostings
print "totalNumOfTerms: :",totalNumOfTerms
print "Ends."
inputFileHandler0.close()
outputFileHandler.close()
exit(1)



numOfBytesRead = 0
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_simple_document_posting_array_20140606"
inputFileHandler0 = open(inputFileName,"r")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize
numOfBytes = 0

while numOfBytes < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        print docID,score1
        for i in range(0,score1):
            byteString = inputFileHandler0.read(4)
            (termID,) = unpack( "1I", byteString)
            print "----->",termID
        numOfBytes += 4 + 4 + score1 * 4
        # inputFileHandler0.seek(numOfBytesRead)
print "Ends."
inputFileHandler0.close()
exit(1)

numOfBytesRead = 0
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange0_20140607"
inputFileHandler0 = open(inputFileName,"r")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize
numOfBytes = 0

while numOfBytes < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (termID,score1) = unpack( "2I", byteString)
        print termID,score1
        for i in range(0,score1):
            byteString = inputFileHandler0.read(4 + 4 + 4)
            (docID,impactScore,rankInList) = unpack( "1I1f1I", byteString)
            print docID,impactScore,rankInList
        numOfBytes += 4 + 4 + score1 * 12
        # inputFileHandler0.seek(numOfBytesRead)
print "Ends."
inputFileHandler0.close()
exit(1)

score1 = 17075485964
numOfProcess = 20
numOfPostingsForEachProcess = int(score1 / numOfProcess)
print "score1:",score1
print "numOfProcess:",numOfProcess
print "numOfPostingsForEachProcess:",numOfPostingsForEachProcess
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BOverallStatistics_20140606"
inputFileHandler0 = open(inputFileName,"r")
totalNumOfPostings = 0
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    totalNumOfPostings += int(currentLineElements[2])
    if currentLineElements[0] == "0":
        print currentLineElements[0],currentLineElements[1],totalNumOfPostings
    if totalNumOfPostings > numOfPostingsForEachProcess:
        print currentLineElements[0],currentLineElements[1],totalNumOfPostings
        totalNumOfPostings = 0
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "Overall:"
print "totalNumOfPostings:",totalNumOfPostings
print "inputFileName:",inputFileName
inputFileHandler0.close()
exit(1)

inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BOverallStatistics_20140606"
inputFileHandler0 = open(inputFileName,"r")
totalNumOfPostings = 0
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    totalNumOfPostings += int(currentLineElements[2])
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "Overall:"
print "totalNumOfPostings:",totalNumOfPostings
print "inputFileName:",inputFileName
inputFileHandler0.close()
exit(1)

outputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BOverallStatistics_20140606_Completed"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BOverallStatistics_20140606_RAW"
inputFileHandler0 = open(inputFileName,"r")

currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    if len(currentLineElements) == 3:
        outputFileHandler.write(currentLine)
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
outputFileHandler.close()
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)

# step1:
queryTermDict = {}
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/realFreqOfTermsIn_100KQueries_0_1_95%"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    currentLineElements = line.strip().split(" ")
    currentTerm = currentLineElements[0]
    if currentTerm not in queryTermDict:
        queryTermDict[currentTerm] = 1
print "len(queryTermDict):",len(queryTermDict)
inputFileHandler0.close()

# step2:
qualityMeasureQueryTermDict = {}
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/04_06.topics.701-850.polyIRTKCompatibleMode_manuallyReformatted"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    queryTermElements = line.strip().split(":")[1].split(" ")
    for queryTerm in queryTermElements:
        if queryTerm.lower().strip() != "" and queryTerm.lower() not in qualityMeasureQueryTermDict:
            qualityMeasureQueryTermDict[queryTerm.lower()] = 1
print "len(qualityMeasureQueryTermDict):",len(qualityMeasureQueryTermDict)
print "Overall:"
print set(qualityMeasureQueryTermDict) - set(queryTermDict)
print len(set(qualityMeasureQueryTermDict) - set(queryTermDict))
inputFileHandler0.close()
exit(1)


numOfBytesRead = 0
inputFileList = []
inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange0_20140607"
inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange1_20140607"
inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange2_20140607"
inputFileName4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange3_20140607"
inputFileName5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange4_20140607"
inputFileName6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange5_20140607"
inputFileName7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange6_20140607"
inputFileName8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange7_20140607"
inputFileName9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange8_20140607"
inputFileName10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange9_20140607"
inputFileList.append(inputFileName1)
inputFileList.append(inputFileName2)
inputFileList.append(inputFileName3)
inputFileList.append(inputFileName4)
inputFileList.append(inputFileName5)
inputFileList.append(inputFileName6)
inputFileList.append(inputFileName7)
inputFileList.append(inputFileName8)
inputFileList.append(inputFileName9)
inputFileList.append(inputFileName10)

totalNumOfPostings = 0
totalNumOfTerms = 0
for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"r")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    numOfBytes = 0

    while numOfBytes < fileSize:
            byteString = inputFileHandler0.read(4 + 4)
            (termID,score1) = unpack( "2I", byteString)
            # print termID,score1
            totalNumOfPostings += score1
            totalNumOfTerms += 1
            for i in range(0,score1):
                byteString = inputFileHandler0.read(4 + 4 + 4)
                (docID,impactScore,rank) = unpack( "1I1f1I", byteString)
                print docID,impactScore,rank

            numOfBytes += 4 + 4 + score1 * 12
            inputFileHandler0.seek(numOfBytes)
            if totalNumOfTerms % 100000 == 0:
                print "# of terms processed:",totalNumOfTerms
    print "numOfBytes:",numOfBytes
    print "totalNumOfPostings:",totalNumOfPostings
    print "totalNumOfTerms:",totalNumOfTerms
print "overall:"
print "totalNumOfPostings:",totalNumOfPostings
print "totalNumOfTerms:",totalNumOfTerms
print "Ends."
inputFileHandler0.close()
exit(1)

numOfBytesRead = 0
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_simple_document_posting_array_20140606"
inputFileHandler0 = open(inputFileName,"r")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize
numOfBytes = 0

while numOfBytes < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        print docID,score1
        for i in range(0,score1):
            byteString = inputFileHandler0.read(4)
            (termID,) = unpack( "1I", byteString)
            print "----->",termID
        numOfBytes += 4 + 4 + score1 * 4
        # inputFileHandler0.seek(numOfBytesRead)
print "Ends."
inputFileHandler0.close()
exit(1)

numOfBytesRead = 0
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/ranksANDImpactsOfEachTermForClueweb09B/ranksANDImpact_termIDRange0_20140607"
inputFileHandler0 = open(inputFileName,"r")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize
numOfBytes = 0

while numOfBytes < fileSize:
        byteString = inputFileHandler0.read(4 + 4)
        (termID,score1) = unpack( "2I", byteString)
        print termID,score1
        for i in range(0,score1):
            byteString = inputFileHandler0.read(4 + 4 + 4)
            (docID,impactScore,rankInList) = unpack( "1I1f1I", byteString)
            print docID,impactScore,rankInList
        numOfBytes += 4 + 4 + score1 * 12
        # inputFileHandler0.seek(numOfBytesRead)
print "Ends."
inputFileHandler0.close()
exit(1)

score1 = 17075485964
numOfProcess = 20
numOfPostingsForEachProcess = int(score1 / numOfProcess)
print "score1:",score1
print "numOfProcess:",numOfProcess
print "numOfPostingsForEachProcess:",numOfPostingsForEachProcess
inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BOverallStatistics_20140606"
inputFileHandler0 = open(inputFileName,"r")
totalNumOfPostings = 0
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    totalNumOfPostings += int(currentLineElements[2])
    if currentLineElements[0] == "0":
        print currentLineElements[0],currentLineElements[1],totalNumOfPostings
    if totalNumOfPostings > numOfPostingsForEachProcess:
        print currentLineElements[0],currentLineElements[1],totalNumOfPostings
        totalNumOfPostings = 0
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "Overall:"
print "totalNumOfPostings:",totalNumOfPostings
print "inputFileName:",inputFileName
inputFileHandler0.close()
exit(1)

inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BOverallStatistics_20140606"
inputFileHandler0 = open(inputFileName,"r")
totalNumOfPostings = 0
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    totalNumOfPostings += int(currentLineElements[2])
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "Overall:"
print "totalNumOfPostings:",totalNumOfPostings
print "inputFileName:",inputFileName
inputFileHandler0.close()
exit(1)

outputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BOverallStatistics_20140606_Completed"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BOverallStatistics_20140606_RAW"
inputFileHandler0 = open(inputFileName,"r")

currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    if len(currentLineElements) == 3:
        outputFileHandler.write(currentLine)
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
outputFileHandler.close()
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)

xPoints = []
yPoints = []
numOfDocs = 25205179
sumNumOfPostings = 0
sumNumOfXdoc = 0.0
inputFileName = "/xDocRelated/outputXDocRelatedValues_WHOLE_20140603_xDocValues_sortedByXdocValue"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
lineNumber = 1
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentNumOfPostings = int(currentLineElements[1])
    currentXdocValue = float(currentLineElements[2])

    xPoints.append(currentNumOfPostings)
    yPoints.append(currentXdocValue)

    sumNumOfPostings += currentNumOfPostings
    sumNumOfXdoc += currentXdocValue

    if lineNumber % 1000000 == 0:
        print "lineNumber:",lineNumber,"processed"

    currentLine = inputFileHandler0.readline()
    lineNumber += 1

xPoints.sort()
yPoints.sort()
print "avg # of postings:",sumNumOfPostings/numOfDocs
print "avg # of xdoc:",sumNumOfXdoc/numOfDocs
print "mean # of postings:",xPoints[12602589]
print "mean # of xdoc:",yPoints[12602589]
inputFileHandler0.close()
exit(1)

randomLineNumberDict = {}
randomLineNumberList = []
while len(randomLineNumberDict) != 10000:
    currentLineNumber = randint(1,25205179)
    if currentLineNumber not in randomLineNumberDict:
        randomLineNumberDict[currentLineNumber] = 1
print len(randomLineNumberDict)
randomLineNumberList = randomLineNumberDict.keys()
randomLineNumberList.sort()
# print randomLineNumberList

xPoints = []
yPoints = []

inputFileName = "/xDocRelated/outputXDocRelatedValues_WHOLE_20140603_xDocValues_sortedByXdocValue"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
lineNumber = 1
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentNumOfPostings = int(currentLineElements[1])
    currentXdocValue = float(currentLineElements[2])

    '''
    xPoints.append(currentNumOfPostings)
    yPoints.append(currentXdocValue)
    '''

    if lineNumber not in randomLineNumberDict:
        pass
    else:
        xPoints.append(currentNumOfPostings)
        yPoints.append(currentXdocValue)

    if lineNumber % 1000000 == 0:
        print "lineNumber:",lineNumber,"processed"

    currentLine = inputFileHandler0.readline()
    lineNumber += 1
inputFileHandler0.close()

print len(xPoints)
print len(yPoints)

matplotlib.pyplot.autoscale(enable=True, axis='both', tight=None)
matplotlib.pyplot.scatter(xPoints,yPoints)
matplotlib.pyplot.show()
exit(1)


outputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/outputXDocRelatedValues_WHOLE_20140603_xDocValues_sortedByXdocValue"
outputFileName2 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/outputXDocRelatedValues_WHOLE_20140603_prefixXdocValues_sortedByXdocValue"
outputFileName3 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/outputXDocRelatedValues_WHOLE_20140603_prefixXdocPercentages_sortedByXdocValue"
outputFileHandler1 = open(outputFileName1,"w")
outputFileHandler2 = open(outputFileName2,"w")
outputFileHandler3 = open(outputFileName3,"w")

inputFileName = "/xDocRelated/outputXDocRelatedValues_WHOLE_Advanced_reformatted_20140601_FINAL_sortedByXdocValue"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = currentLineElements[0]
    currentNumOfPostings = currentLineElements[1]
    currentXdocValue = currentLineElements[2]

    pt_max = currentLineElements[3]
    pt_20p = currentLineElements[4]
    pt_40p = currentLineElements[5]
    pt_60p = currentLineElements[6]
    pt_80p = currentLineElements[7]
    pt_min = currentLineElements[8]

    prefixXdocValueAtFirstPosition = currentLineElements[9]
    prefixXdocValueAt20p = currentLineElements[10]
    prefixXdocValueAt40p = currentLineElements[11]
    prefixXdocValueAt60p = currentLineElements[12]
    prefixXdocValueAt80p = currentLineElements[13]
    prefixXdocValueAtLastPosition = currentLineElements[14]

    prefixXdocPercentageAtFirstPosition = currentLineElements[15]
    prefixXdocPercentageAt20 = currentLineElements[16]
    prefixXdocPercentageAt40 = currentLineElements[17]
    prefixXdocPercentageAt60 = currentLineElements[18]
    prefixXdocPercentageAt80 = currentLineElements[19]
    prefixXdocPercentageAtLastPosition = currentLineElements[20]

    outputLine1 = currentDocID + " " + currentNumOfPostings + " " + currentXdocValue + " " + pt_max + " " + pt_20p + " " + pt_40p + " " + pt_60p + " " + pt_80p + " " + pt_min + "\n"
    outputLine2 = currentDocID + " " + currentNumOfPostings + " " + currentXdocValue + " " + prefixXdocValueAtFirstPosition + " " + prefixXdocValueAt20p + " " + prefixXdocValueAt40p + " " + prefixXdocValueAt60p + " " + prefixXdocValueAt80p + " " + prefixXdocValueAtLastPosition + "\n"
    outputLine3 = currentDocID + " " + currentNumOfPostings + " " + currentXdocValue + " " + prefixXdocPercentageAtFirstPosition + " " + prefixXdocPercentageAt20 + " " + prefixXdocPercentageAt40 + " " + prefixXdocPercentageAt60 + " " + prefixXdocPercentageAt80 + " " + prefixXdocPercentageAtLastPosition + "\n"
    outputFileHandler1.write(outputLine1)
    outputFileHandler2.write(outputLine2)
    outputFileHandler3.write(outputLine3)
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
outputFileHandler1.close()
outputFileHandler2.close()
outputFileHandler3.close()
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName1:",outputFileName1
print "outputFileName2:",outputFileName2
print "outputFileName3:",outputFileName3
exit(1)

'''
randomQueryIDsDict = {}
# random qids components
while len(randomQueryIDsDict) != 75:
    currentRandomQID = randint(710,850)
    if currentRandomQID not in randomQueryIDsDict:
        randomQueryIDsDict[currentRandomQID] = 1
print randomQueryIDsDict
exit(1)
'''

randomQueryIDsDict = {769: 1, 770: 1, 771: 1, 772: 1, 773: 1, 774: 1, 777: 1, 779: 1, 780: 1, 783: 1, 787: 1, 788: 1, 792: 1, 794: 1, 795: 1, 797: 1, 799: 1, 800: 1, 801: 1, 802: 1, 804: 1, 805: 1, 808: 1, 809: 1, 810: 1, 711: 1, 850: 1, 814: 1, 815: 1, 817: 1, 818: 1, 822: 1, 824: 1, 825: 1, 827: 1, 828: 1, 829: 1, 830: 1, 831: 1, 832: 1, 835: 1, 837: 1, 710: 1, 839: 1, 713: 1, 715: 1, 844: 1, 845: 1, 846: 1, 849: 1, 722: 1, 723: 1, 724: 1, 725: 1, 727: 1, 729: 1, 732: 1, 733: 1, 734: 1, 735: 1, 740: 1, 741: 1, 743: 1, 746: 1, 749: 1, 750: 1, 753: 1, 754: 1, 755: 1, 756: 1, 758: 1, 759: 1, 762: 1, 765: 1, 766: 1}
randomQueryIDList = randomQueryIDsDict.keys()
randomQueryIDList.sort()
print "len(randomQueryIDList):",randomQueryIDList

outputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/testRanking_20140514_K1_B_option4_AND_TOP2M_reformatted_randomSelect75_training_try2"
outputFileHandler1 = open(outputFileName1,"w")

outputFileName2 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/testRanking_20140514_K1_B_option4_AND_TOP2M_reformatted_randomSelect75_testing_try2"
outputFileHandler2 = open(outputFileName2,"w")

inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/testRanking_20140514_K1_B_option4_AND_TOP2M_reformatted"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split("\t")
    currentQID = int(lineElements[0])
    if currentQID not in randomQueryIDsDict:
        outputFileHandler2.write(line)
    else:
        outputFileHandler1.write(line)


'''
outputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/qrels.gov2.all_randomSelect75_training_try2"
outputFileHandler1 = open(outputFileName1,"w")

outputFileName2 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/qrels.gov2.all_randomSelect75_testing_try2"
outputFileHandler2 = open(outputFileName2,"w")

inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/qrels.gov2.all"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentQID = int(lineElements[0])
    if currentQID not in randomQueryIDsDict:
        outputFileHandler2.write(line)
    else:
        outputFileHandler1.write(line)
'''

inputFileHandler0.close()
outputFileHandler1.close()
outputFileHandler2.close()
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName1:",outputFileName1
print "outputFileName2:",outputFileName2
exit(1)

inputFileName = ""
exit(1)

#print stem("appealing")
#exit(1)

outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/head95KQueries_after_stopwordsRemoved_stemmed"
outputFileHandler = open(outputFileName,"w")

stopWordListDict = {}
ifn = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/stoplist.txt"
inputFileHandler0 = open(ifn,"r")
for line in inputFileHandler0.readlines():
    currentTerm = line.strip()
    if currentTerm not in stopWordListDict:
        stopWordListDict[currentTerm] = 1
inputFileHandler0.close()
print "len(stopWordListDict):",len(stopWordListDict)

inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/head95KQueries"
inputFileHandler0 = open(inputFileName1,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(":")
    data = lineElements[1]
    data = data.lower()
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    queryContent = data
    queryNewContent = ""
    queryNewContent2 = ""
    queryContentElements = queryContent.strip().split(" ")
    for currentQueryElement in queryContentElements:
        currentQueryElement = currentQueryElement.strip()
        if currentQueryElement != "":
            if currentQueryElement in stopWordListDict:
                pass
            else:
                queryNewContent += currentQueryElement + " "
                currentQueryElementInStemmedForm = stem(currentQueryElement)
                queryNewContent2 += currentQueryElementInStemmedForm + " "
    queryNewContent2 = queryNewContent2.strip()
    print data
    print queryContent
    print queryNewContent
    print queryNewContent2
    print
inputFileHandler0.close()
outputFileHandler.close()
exit(1)

sumProbablity = 0.0
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/polyIRToolkit_Wei/wholeLexiconWithGoodTuringProbablity_20140602"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
currentLineElements = currentLine.strip().split(" ")
sumProbablity += float(currentLineElements[1])
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    sumProbablity += float(currentLineElements[1])
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "sumProbablity:",sumProbablity
exit(1)




currentDocDict = {}
TOTAL_NUM_OF_POSTINGS = 6451948010
numOfPostingNeededToBePoppedAtDebugPercentage = 5000000
numOfPostingNeededToBePoppedAt1Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.01)
numOfPostingNeededToBePoppedAt2Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.02)
numOfPostingNeededToBePoppedAt3Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.03)
numOfPostingNeededToBePoppedAt4Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.04)
numOfPostingNeededToBePoppedAt5Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.05)
numOfPostingNeededToBePoppedAt6Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.06)
numOfPostingNeededToBePoppedAt7Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.07)
numOfPostingNeededToBePoppedAt8Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.08)
numOfPostingNeededToBePoppedAt9Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.09)
numOfPostingNeededToBePoppedAt10Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.1)
numOfPostingNeededToBePoppedAt15Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.15)
numOfPostingNeededToBePoppedAt20Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.2)
numOfPostingNeededToBePoppedAt30Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.3)
numOfPostingNeededToBePoppedAt40Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.4)
numOfPostingNeededToBePoppedAt50Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.5)
numOfPostingNeededToBePoppedAt60Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.6)
numOfPostingNeededToBePoppedAt70Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.7)
numOfPostingNeededToBePoppedAt80Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.8)
numOfPostingNeededToBePoppedAt90Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.9)
numOfPostingNeededToBePoppedAt100Percentage = int(TOTAL_NUM_OF_POSTINGS * 1.0)
print "numOfPostingNeededToBePoppedAtDebugPercentage:",numOfPostingNeededToBePoppedAtDebugPercentage
print "numOfPostingNeededToBePoppedAt1Percentage:",numOfPostingNeededToBePoppedAt1Percentage
print "numOfPostingNeededToBePoppedAt2Percentage:",numOfPostingNeededToBePoppedAt2Percentage
print "numOfPostingNeededToBePoppedAt3Percentage:",numOfPostingNeededToBePoppedAt3Percentage
print "numOfPostingNeededToBePoppedAt4Percentage:",numOfPostingNeededToBePoppedAt4Percentage
print "numOfPostingNeededToBePoppedAt5Percentage:",numOfPostingNeededToBePoppedAt5Percentage
print "numOfPostingNeededToBePoppedAt6Percentage:",numOfPostingNeededToBePoppedAt6Percentage
print "numOfPostingNeededToBePoppedAt7Percentage:",numOfPostingNeededToBePoppedAt7Percentage
print "numOfPostingNeededToBePoppedAt8Percentage:",numOfPostingNeededToBePoppedAt8Percentage
print "numOfPostingNeededToBePoppedAt9Percentage:",numOfPostingNeededToBePoppedAt9Percentage
print "numOfPostingNeededToBePoppedAt10Percentage:",numOfPostingNeededToBePoppedAt10Percentage
print "numOfPostingNeededToBePoppedAt15Percentage:",numOfPostingNeededToBePoppedAt15Percentage
print "numOfPostingNeededToBePoppedAt20Percentage:",numOfPostingNeededToBePoppedAt20Percentage
print "numOfPostingNeededToBePoppedAt30Percentage:",numOfPostingNeededToBePoppedAt30Percentage
print "numOfPostingNeededToBePoppedAt40Percentage:",numOfPostingNeededToBePoppedAt40Percentage
print "numOfPostingNeededToBePoppedAt50Percentage:",numOfPostingNeededToBePoppedAt50Percentage
print "numOfPostingNeededToBePoppedAt60Percentage:",numOfPostingNeededToBePoppedAt60Percentage
print "numOfPostingNeededToBePoppedAt70Percentage:",numOfPostingNeededToBePoppedAt70Percentage
print "numOfPostingNeededToBePoppedAt80Percentage:",numOfPostingNeededToBePoppedAt80Percentage
print "numOfPostingNeededToBePoppedAt90Percentage:",numOfPostingNeededToBePoppedAt90Percentage
print "numOfPostingNeededToBePoppedAt100Percentage:",numOfPostingNeededToBePoppedAt100Percentage
numOfBytes = 0
numOfPostingPopped = 0
numOfPostingBeInTOP10 = 0

inputFileName3 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/allPostingsBeingPopped20140321Morning_weight_0_WHOLE"
inputFileHandler0 = open(inputFileName3,"rb")
print "--->posting file to evaluate:",inputFileName3
statinfo = os.stat(inputFileName3)
fileSize = statinfo.st_size
print "file size:",fileSize
numOfResultsReturnedCountedUpTo10 = 0

while numOfBytes < fileSize:
    # old version
    # each time, just read the info of ONE posting, too few
    # byteString = inputFileHandler0.read(4 + 4 + 4)
    # (termID,docID,currentProbability) = unpack( "2I1f", byteString)

    # current version
    # each time, read the info of 1M postings
    byteStringBuffer = inputFileHandler0.read( 1000000 * 16)
    byteStringBufferIndexPosition = 0
    for i in range(0,1000000):
        byteString = byteStringBuffer[byteStringBufferIndexPosition:byteStringBufferIndexPosition+16]
        byteStringBufferIndexPosition += 16
        (termID,docID,currentProbability,impactScore) = unpack( "2I2f", byteString)
        if docID not in currentDocDict:
            currentDocDict[docID] = 1
        else:
            currentDocDict[docID] += 1

        if numOfPostingPopped == numOfPostingNeededToBePoppedAtDebugPercentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt1Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt2Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt3Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt4Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt5Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt6Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt7Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt8Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt9Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt10Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt15Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt20Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt30Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt40Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt50Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt60Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt70Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt80Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt90Percentage:
            if numOfPostingPopped == numOfPostingNeededToBePoppedAtDebugPercentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_debugPercentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt1Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_1Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt2Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_2Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName


            if numOfPostingPopped == numOfPostingNeededToBePoppedAt3Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_3Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt4Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_4Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName


            if numOfPostingPopped == numOfPostingNeededToBePoppedAt5Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_5Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt6Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_6Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt7Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_7Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt8Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_8Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt9Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_9Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt10Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_10Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt15Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_15Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt20Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_20Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt30Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_30Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt40Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_40Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt50Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_50Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt60Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_60Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt70Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_70Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt80Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_80Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt90Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_90Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt100Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/DocumentDistribution_100Percentage_weight0"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName


        numOfBytes += 12
        numOfPostingPopped += 1
        if numOfPostingPopped % 1000000 == 0:
            print str(numOfPostingPopped),"postings have been examined."

inputFileHandler0.close()
exit(1)



#a = 13.94123
#print round(a, 1)
#exit(1)

outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/outputXDocRelatedValues_WHOLE_Advanced_reformatted_20140601_FINAL_sortedByXdocValue"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/outputXDocRelatedValues_WHOLE_Advanced_20140601_FINAL_sortedByXdocValue"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    # print "len(currentLineElements):",len(currentLineElements)
    if len(currentLineElements) == 21:
        outputLine = str(currentLineElements[0]) + " " + str(currentLineElements[1]) + " "
        for currentValueInString in currentLineElements[2:]:
            # wrong way of using it
            # currentValueInFloat = round(float(currentValueInString), 5)
            currentValueInFloat = '{:.5e}'.format(float(currentValueInString))
            outputLine += str(currentValueInFloat) + " "
        outputLine = outputLine.strip()
        outputFileHandler.write(outputLine + "\n")
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
outputFileHandler.close()

print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)

outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/outputXDocRelatedValues_0_6M_20140601_Refined"
outputFileHandler = open(outputFileName,"w")

inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/outputXDocRelatedValues_0_6M_20140531_RAW"
#inputFileName2 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/outputXDocRelatedValues_6M_12M_20140531_RAW"
#inputFileName3 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/outputXDocRelatedValues_12M_18Dot5M_20140531_RAW"
#inputFileName4 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/outputXDocRelatedValues_18Dot5M_25M_20140531_RAW"
inputFileHandler0 = open(inputFileName4,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    if len(currentLineElements) == 9:
        outputFileHandler.write(currentLine)
    currentLine = inputFileHandler0.readline()

print "OVERALL:"
print "inputFileName:",inputFileName4
print "outputFileName:",outputFileName
inputFileHandler0.close()
outputFileHandler.close()
exit(1)

inputFileList = []
inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/results/dynamicBigramFromJuan/allPostingsBeingPopped20140605_WHOLE"
#inputFileName4 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART2_DOC.binary"
#inputFileName5 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART3_DOC.binary"
#inputFileName6 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART4_DOC.binary"
inputFileList.append(inputFileName3)
#inputFileList.append(inputFileName4)
#inputFileList.append(inputFileName5)
#inputFileList.append(inputFileName6)

for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    numOfBytesRead = 0
    while numOfBytesRead < fileSize:
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
        (termID,docID,finalProbability,impactScore) = unpack( "2I2f", byteString)
        print termID,docID,finalProbability,impactScore

        numOfBytesRead += 16

        # inputFileHandler0.seek(numOfBytesRead)
    inputFileHandler0.close()
print "Ends."
exit(1)

inputFileList = []
inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/data/dynamicBigramFromJuan/forward.idx.dynamic_withImpactScoreAdded_20140605_WHOLE"
#inputFileName4 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART2_DOC.binary"
#inputFileName5 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART3_DOC.binary"
#inputFileName6 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART4_DOC.binary"
inputFileList.append(inputFileName3)
#inputFileList.append(inputFileName4)
#inputFileList.append(inputFileName5)
#inputFileList.append(inputFileName6)

for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize

    numOfBytesRead = 0
    numOfDocsProcessed = 0
    # numOfDocsProcessed = 6000000

    while numOfBytesRead <= fileSize:
        if numOfDocsProcessed % 100000 == 0:
            print "(DEBUG):",numOfDocsProcessed,numOfBytesRead
        # print "numOfBytesRead:",numOfBytesRead
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        print "(DEBUG)docID:",str(docID),str(score1)


        for index in range(0,score1):
            byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
            (termID,staticProbability,dynamicProbability,combinedProbability,impactScore) = unpack( "1I4f", byteString)
            print index,termID,staticProbability,dynamicProbability,combinedProbability,impactScore

        numOfBytesRead += 4
        numOfBytesRead += 4
        numOfBytesRead += score1 * 4 * 5

        inputFileHandler0.seek(numOfBytesRead)
        numOfDocsProcessed += 1

        if numOfDocsProcessed == 2:
            break

    print "(DEBUG):",numOfDocsProcessed,numOfBytesRead
    print "currentInputFileName:",currentInputFileName
    inputFileHandler0.close()
print "Ends."
exit(1)

currentDocDict = {}
TOTAL_NUM_OF_POSTINGS = 6451948010
numOfPostingNeededToBePoppedAtDebugPercentage = 5000000
numOfPostingNeededToBePoppedAt1Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.01)
numOfPostingNeededToBePoppedAt3Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.03)
numOfPostingNeededToBePoppedAt5Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.05)
numOfPostingNeededToBePoppedAt10Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.1)
numOfPostingNeededToBePoppedAt15Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.15)
numOfPostingNeededToBePoppedAt20Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.2)
numOfPostingNeededToBePoppedAt30Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.3)
numOfPostingNeededToBePoppedAt40Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.4)
numOfPostingNeededToBePoppedAt50Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.5)
numOfPostingNeededToBePoppedAt60Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.6)
numOfPostingNeededToBePoppedAt70Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.7)
numOfPostingNeededToBePoppedAt80Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.8)
numOfPostingNeededToBePoppedAt90Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.9)
numOfPostingNeededToBePoppedAt100Percentage = int(TOTAL_NUM_OF_POSTINGS * 1.0)
print "numOfPostingNeededToBePoppedAtDebugPercentage:",numOfPostingNeededToBePoppedAtDebugPercentage
print "numOfPostingNeededToBePoppedAt1Percentage:",numOfPostingNeededToBePoppedAt1Percentage
print "numOfPostingNeededToBePoppedAt3Percentage:",numOfPostingNeededToBePoppedAt3Percentage
print "numOfPostingNeededToBePoppedAt5Percentage:",numOfPostingNeededToBePoppedAt5Percentage
print "numOfPostingNeededToBePoppedAt10Percentage:",numOfPostingNeededToBePoppedAt10Percentage
print "numOfPostingNeededToBePoppedAt15Percentage:",numOfPostingNeededToBePoppedAt15Percentage
print "numOfPostingNeededToBePoppedAt20Percentage:",numOfPostingNeededToBePoppedAt20Percentage
print "numOfPostingNeededToBePoppedAt30Percentage:",numOfPostingNeededToBePoppedAt30Percentage
print "numOfPostingNeededToBePoppedAt40Percentage:",numOfPostingNeededToBePoppedAt40Percentage
print "numOfPostingNeededToBePoppedAt50Percentage:",numOfPostingNeededToBePoppedAt50Percentage
print "numOfPostingNeededToBePoppedAt60Percentage:",numOfPostingNeededToBePoppedAt60Percentage
print "numOfPostingNeededToBePoppedAt70Percentage:",numOfPostingNeededToBePoppedAt70Percentage
print "numOfPostingNeededToBePoppedAt80Percentage:",numOfPostingNeededToBePoppedAt80Percentage
print "numOfPostingNeededToBePoppedAt90Percentage:",numOfPostingNeededToBePoppedAt90Percentage
print "numOfPostingNeededToBePoppedAt100Percentage:",numOfPostingNeededToBePoppedAt100Percentage
numOfBytes = 0
numOfPostingPopped = 0
numOfPostingBeInTOP10 = 0

inputFileName3 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140320Afternoon_weight_50_WHOLE"
inputFileHandler0 = open(inputFileName3,"rb")
print "--->posting file to evaluate:",inputFileName3
statinfo = os.stat(inputFileName3)
fileSize = statinfo.st_size
print "file size:",fileSize
numOfResultsReturnedCountedUpTo10 = 0

while numOfBytes < fileSize:
    # old version
    # each time, just read the info of ONE posting, too few
    # byteString = inputFileHandler0.read(4 + 4 + 4)
    # (termID,docID,currentProbability) = unpack( "2I1f", byteString)
    
    # current version
    # each time, read the info of 1M postings
    byteStringBuffer = inputFileHandler0.read( 1000000 * 16)
    byteStringBufferIndexPosition = 0
    for i in range(0,1000000):
        byteString = byteStringBuffer[byteStringBufferIndexPosition:byteStringBufferIndexPosition+16]
        byteStringBufferIndexPosition += 16
        (termID,docID,currentProbability,impactScore) = unpack( "2I2f", byteString)
        if docID not in currentDocDict:
            currentDocDict[docID] = 1
        else:
            currentDocDict[docID] += 1

        if numOfPostingPopped == numOfPostingNeededToBePoppedAtDebugPercentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt1Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt3Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt5Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt10Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt15Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt20Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt30Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt40Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt50Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt60Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt70Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt80Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt90Percentage:
            if numOfPostingPopped == numOfPostingNeededToBePoppedAtDebugPercentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_debugPercentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt1Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_1Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt3Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_3Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt5Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_5Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt10Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_10Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt15Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_15Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt20Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_20Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt30Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_30Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt40Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_40Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt50Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_50Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt60Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_60Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt70Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_70Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt80Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_80Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt90Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_90Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt100Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_100Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            

        numOfBytes += 12
        numOfPostingPopped += 1
        if numOfPostingPopped % 1000000 == 0:
            print str(numOfPostingPopped),"postings have been examined."

inputFileHandler0.close()
exit(1)

inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_Xdocs_and_variations_20140505Night"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
currentLineElements = currentLine.strip().split(" ")
currentMaxXdocValue = float(currentLineElements[4])
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentXdocValue = float(currentLineElements[4])
    if currentMaxXdocValue >= currentXdocValue:
        pass
    else:
        print "error"
        exit(1)
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "Passed."
exit(1)



inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnNumOfPostings_20140602_15Percentage_raw"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    #if len(lineElements) == 7:
    #    print lineElements
    if len(lineElements) == 7 and lineElements[-2] == "PolyIRTKDebug":
        print lineElements[0],lineElements[1],lineElements[2],lineElements[3],lineElements[4],"NYU_IRTK"
inputFileHandler0.close()
exit(1)



outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_Xdocs_and_variations_XdocSquareDividedByNumOfPostings_20140601"
outputFileHanlder = open(outputFileName,"w")


inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_Xdocs_and_variations_sortedByXdocMultipleByLogNum_20150505"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentNumOfPostings = int(currentLineElements[2])
    currentXdocValue = float(currentLineElements[4])
    newlyAddedValue = math.pow(currentXdocValue, 2) / currentNumOfPostings
    outputLine = currentLine.strip() + " " + str(newlyAddedValue) + "\n" 
    outputFileHanlder.write(outputLine)
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
outputFileHanlder.close()
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)



print "Begins..."

# on dodo:
termIDWithProbablityDict = {}
inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermsFrom100KWithTermIDANDGoodTuringProbabilites_20140510"
inputFileHandler0 = open(inputFileName2,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentTermID = int(lineElements[0])
    currentProbablity = float(lineElements[2])
    if currentTermID not in termIDWithProbablityDict:
        termIDWithProbablityDict[currentTermID] = currentProbablity
print "len(termIDWithProbablityDict):",len(termIDWithProbablityDict)
inputFileHandler0.close()

# options:
inputFileList = []
#inputFileName2 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_DEBUG_DOC.binary"
#inputFileName3 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART0_1_DOC.binary"
#inputFileName4 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART2_DOC.binary"
#inputFileName5 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART3_DOC.binary"
#inputFileName6 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART4_DOC.binary"
inputFileList.append(inputFileName2)
#inputFileList.append(inputFileName4)
#inputFileList.append(inputFileName5)
#inputFileList.append(inputFileName6)

for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    
    numOfBytesRead = 0
    numOfDocsProcessed = 0
    # numOfDocsProcessed = 6000000
    
    print "docID #OfPostings xdoc P(t)_max P(t)_20% P(t)_40% P(t)_60% P(t)_80% P(t)_min"
    while numOfBytesRead < fileSize:
        if numOfDocsProcessed % 100000 == 0:
            print "(DEBUG):",numOfDocsProcessed,numOfBytesRead
        # print "numOfBytesRead:",numOfBytesRead
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        # print "(DEBUG)docID:",str(docID),str(score1)
                
        positionAt20Percent = int(score1 * 0.2)
        positionAt40Percent = int(score1 * 0.4)
        positionAt60Percent = int(score1 * 0.6)
        positionAt80Percent = int(score1 * 0.8)
        
        valueAtFirst = 0.0
        valueAt20Percent = 0.0
        valueAt40Percent = 0.0
        valueAt60Percent = 0.0
        valueAt80Percent = 0.0
        valueAtLast = 0.0
        
        xdocValue = 0
        docPostingTupleList = []
        for index in range(0,score1):
            byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
            (termID,staticProbability,dynamicProbability,combinedProbability,impactScore) = unpack( "1I4f", byteString)
            # print index,termID,staticProbability,dynamicProbability,combinedProbability,impactScore
            
            currentProbablityValue = 0.0
            if termID not in termIDWithProbablityDict:
                currentProbablityValue = 0.0000000012845
                # print "----->",termID,currentProbablityValue,"UNK"
                # print
                xdocValue += currentProbablityValue
                docPostingTupleList.append((termID,currentProbablityValue))
            else:
                currentProbablityValue = termIDWithProbablityDict[termID]
                # print "----->",termID,currentProbablityValue,"FOUND"
                # print
                xdocValue += currentProbablityValue
                docPostingTupleList.append((termID,currentProbablityValue))
        docPostingTupleList.sort(cmp=None, key=itemgetter(1), reverse=True)
        
        # print "docID:",docID
        # print "xdocValue:",xdocValue
        # print "currentRankSortedByXdoc:",currentRankSortedByXdoc
        # print "# of postings:",len(docPostingTupleList)
        # print "P(t)_max:",docPostingTupleList[0]
        # print "P(t)_20%:",docPostingTupleList[positionAt20Percent]
        # print "P(t)_40%:",docPostingTupleList[positionAt40Percent]
        # print "P(t)_60%:",docPostingTupleList[positionAt60Percent]
        # print "P(t)_80%:",docPostingTupleList[positionAt80Percent]
        # print "P(t)_min:",docPostingTupleList[-1]
        # print docPostingTupleList
        # print
        
        # output try2 on 2014/05/31 night
        (_,valueAtFirst) = docPostingTupleList[0]
        (_,valueAt20Percent) = docPostingTupleList[positionAt20Percent]
        (_,valueAt40Percent) = docPostingTupleList[positionAt40Percent]
        (_,valueAt60Percent) = docPostingTupleList[positionAt60Percent]
        (_,valueAt80Percent) = docPostingTupleList[positionAt80Percent]
        (_,valueAtLast) = docPostingTupleList[-1]
        print docID,score1,xdocValue,valueAtFirst,valueAt20Percent,valueAt40Percent,valueAt60Percent,valueAt80Percent,valueAtLast

        numOfBytesRead += 4
        numOfBytesRead += 4
        numOfBytesRead += score1 * 4 * 5
        inputFileHandler0.seek(numOfBytesRead)
        numOfDocsProcessed += 1

        if numOfDocsProcessed == 1:
            break

    print "(DEBUG):",numOfDocsProcessed,numOfBytesRead
    print "currentInputFileName:",currentInputFileName
    inputFileHandler0.close()
print "Ends."
exit(1)


print "Begins..."
docIDWithPrecomputedXdocValueDict = {}
# This file is sorted by Xdoc values
ifn = "/home/diaosi/gov2ClearYourMindAndDoItAgain/gov2_Docs_with_TheirXdocValues_Since20140428_sortedByXdocValues"
inputFileHanlder = open(ifn,"r")
currentLine = inputFileHanlder.readline()
currentLineNumber = 1
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = int(currentLineElements[1])
    currentDocXdocValue = float(currentLineElements[-1])
    if currentDocID not in docIDWithPrecomputedXdocValueDict:
        docIDWithPrecomputedXdocValueDict[currentDocID] = (currentDocXdocValue,currentLineNumber) 
    
    if currentLineNumber == 1000:
        break
    
    currentLine = inputFileHanlder.readline()
    currentLineNumber += 1

# debug injection on 2014/05/31 at school by Wei
docIDWithPrecomputedXdocValueDict[2] = (0.048221,37000000)
print "len(docIDWithPrecomputedXdocValueDict):",len(docIDWithPrecomputedXdocValueDict)
print "docIDWithPrecomputedXdocValueDict[23863602]:",docIDWithPrecomputedXdocValueDict[23863602]
print "docIDWithPrecomputedXdocValueDict[1466145]:",docIDWithPrecomputedXdocValueDict[1466145]
inputFileHanlder.close()

termIDANDTermDict = {}
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
inputFileHanlder = open(inputFileName1,"r")
currentLine = inputFileHanlder.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = int(currentLineElements[0])
    currentTerm = currentLineElements[1]
    if currentTermID not in termIDANDTermDict:
        termIDANDTermDict[currentTermID] = currentTerm 
    currentLine = inputFileHanlder.readline()
print "len(termIDANDTermDict):",len(termIDANDTermDict)
inputFileHanlder.close()

# on dodo:
termIDWithProbablityDict = {}
inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermsFrom100KWithTermIDANDGoodTuringProbabilites_20140510"
inputFileHandler0 = open(inputFileName2,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentTermID = int(lineElements[0])
    currentProbablity = float(lineElements[2])
    if currentTermID not in termIDWithProbablityDict:
        termIDWithProbablityDict[currentTermID] = currentProbablity
print "len(termIDWithProbablityDict):",len(termIDWithProbablityDict)
inputFileHandler0.close()

# options:
inputFileList = []
inputFileName3 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART0_1_DOC.binary"
#inputFileName4 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART2_DOC.binary"
#inputFileName5 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART3_DOC.binary"
#inputFileName6 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART4_DOC.binary"
inputFileList.append(inputFileName3)
#inputFileList.append(inputFileName4)
#inputFileList.append(inputFileName5)
#inputFileList.append(inputFileName6)

for currentInputFileName in inputFileList:
    inputFileHandler0 = open(currentInputFileName,"rb")
    statinfo = os.stat(currentInputFileName)
    fileSize = statinfo.st_size
    print "file size:",fileSize
    
    numOfBytesRead = 0
    numOfDocsProcessed = 0
    # numOfDocsProcessed = 6000000
    
    while numOfBytesRead <= fileSize:
        if numOfDocsProcessed % 100000 == 0:
            print "(DEBUG):",numOfDocsProcessed,numOfBytesRead
        # print "numOfBytesRead:",numOfBytesRead
        byteString = inputFileHandler0.read(4 + 4)
        (docID,score1) = unpack( "2I", byteString)
        # print "(DEBUG)docID:",str(docID),str(score1)
        
        positionAt20Percent = int(score1 * 0.2)
        positionAt40Percent = int(score1 * 0.4)
        positionAt60Percent = int(score1 * 0.6)
        positionAt80Percent = int(score1 * 0.8)
        
        if docID in docIDWithPrecomputedXdocValueDict:
            xdocValue = 0
            docPostingTupleList = []
            for index in range(0,score1):
                byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
                (termID,staticProbability,dynamicProbability,combinedProbability,impactScore) = unpack( "1I4f", byteString)
                # print index,termID,staticProbability,dynamicProbability,combinedProbability,impactScore
                term = "N*O*N*E"
                if termID in termIDANDTermDict:
                    term = termIDANDTermDict[termID]
                currentProbablityValue = 0.0
                if termID not in termIDWithProbablityDict:
                    currentProbablityValue = 0.0000000012845
                    # print "----->",termID,currentProbablityValue,"UNK"
                    # print
                    xdocValue += currentProbablityValue
                    docPostingTupleList.append((termID,term,currentProbablityValue))
                else:
                    currentProbablityValue = termIDWithProbablityDict[termID]
                    # print "----->",termID,currentProbablityValue,"FOUND"
                    # print
                    xdocValue += currentProbablityValue
                    docPostingTupleList.append((termID,term,currentProbablityValue))
            docPostingTupleList.sort(cmp=None, key=itemgetter(2), reverse=True)
            print "docID:",docID
            print "xdocValue:",xdocValue
            (_,currentRankSortedByXdoc) = docIDWithPrecomputedXdocValueDict[docID]
            # print "docIDWithPrecomputedXdocValueDict[docID]:",docIDWithPrecomputedXdocValueDict[docID]
            # print "xdocValue(compute online):",xdocValue
            # print "xdocValue(compute offline):",docIDWithPrecomputedXdocValueDict[docID]
            print "currentRankSortedByXdoc:",currentRankSortedByXdoc
            print "# of postings:",len(docPostingTupleList)
            print "P(t)_max:",docPostingTupleList[0]
            print "P(t)_20%:",docPostingTupleList[positionAt20Percent]
            print "P(t)_40%:",docPostingTupleList[positionAt40Percent]
            print "P(t)_60%:",docPostingTupleList[positionAt60Percent]
            print "P(t)_80%:",docPostingTupleList[positionAt80Percent]
            print "P(t)_min:",docPostingTupleList[-1]
            print docPostingTupleList
            print

        numOfBytesRead += 4
        numOfBytesRead += 4
        numOfBytesRead += score1 * 4 * 5
        
        inputFileHandler0.seek(numOfBytesRead)
        numOfDocsProcessed += 1

        #if numOfDocsProcessed % 10 == 0:
        #    break

    print "(DEBUG):",numOfDocsProcessed,numOfBytesRead
    print "currentInputFileName:",currentInputFileName
    inputFileHandler0.close()
print "Ends."
exit(1)

# Updated by Wei on 2014/05/31 at school
# #OfQueryTermsInQuery #OfQueriesBelongingToThisGroup %OfQueriesBelongingToThisGroup
# 1    1709    0.0179894736842
# 2    14523   0.152873684211
# 3    23509   0.247463157895
# 4    22678   0.238715789474
# 5    15105   0.159
# 6    8479    0.0892526315789
# 7    4188    0.0440842105263
# 8    2143    0.0225578947368
# 9    1123    0.0118210526316
# 10   637     0.00670526315789

def computeChucksOfPostingsForCurrentDocument(docPostingTupleList):
    p1 = 0.0179894736842
    p2 = 0.152873684211
    p3 = 0.247463157895
    p4 = 0.238715789474
    p5 = 0.159
    p6 = 0.0892526315789
    p7 = 0.0440842105263
    p8 = 0.0225578947368
    p9 = 0.0118210526316
    p10 = 0.00670526315789

    xPoints = []
    yPoints1 = []
    yPoints2 = []
    yPoints3 = []
    yPoints4 = []
    currentSumPW = 0.0
    
    for index,currentTuple in enumerate(docPostingTupleList):
        (_,currentPW) = currentTuple
        xPoints.append(index)
        yPoints1.append(currentPW)
        currentSumPW += currentPW
        # currentUD = 1 * math.pow(currentSumPW, 1)
        # currentUD = 1 * math.pow(currentSumPW, 2)
        # currentUD = 1 * math.pow(currentSumPW, 3)
        # currentUD = 1 * math.pow(currentSumPW, 4)
        # currentUD = 1 * math.pow(currentSumPW, 5)
        # currentUD = 1 * math.pow(currentSumPW, 6)
        # currentUD = 1 * math.pow(currentSumPW, 7)
        currentUD = p1 * math.pow(currentSumPW, 1) + p2 * math.pow(currentSumPW, 2) + p3 * math.pow(currentSumPW, 3) + p4 * math.pow(currentSumPW, 4) + p5 * math.pow(currentSumPW, 5) + p6 * math.pow(currentSumPW, 6) + p7 * math.pow(currentSumPW, 7) + p8 * math.pow(currentSumPW, 8) + p9 * math.pow(currentSumPW, 9) + p10 * math.pow(currentSumPW, 10)
        # print index,currentUD
        currentUDI = currentUD / len(xPoints)
        yPoints2.append(currentUDI)
        yPoints4.append(currentUD)

    for index,currentUD in enumerate(yPoints4):
        if index == 0:
            currentUDGap = currentUD - 0
            yPoints3.append( currentUDGap )
        else:
            currentUDGap = currentUD - yPoints4[index-1]
            yPoints3.append( currentUDGap )
    
    # print "--->xPoints:",xPoints
    # print "yPoints1:",yPoints1
    # print "yPoints2:",yPoints2
    # print "--->yPoints3:",yPoints3
    print "len(xPoints):",len(xPoints)
    print "len(yPoints1):",len(yPoints1)
    print "len(yPoints2):",len(yPoints2)
    print "len(yPoints3):",len(yPoints3)
    
    outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/xDoc_analysis_queryLengthCombined_20140512"
    outputFileHandler = open(outputFileName,"w")
    for index,currentUDGap in enumerate(yPoints3):
        # print index,currentUDGap
        # print currentUDGap
        outputFileHandler.write(str(index) + " " + str(currentUDGap) + "\n")
    print "OVERALL:"
    print "outputFileName:",outputFileName
    outputFileHandler.close()
    # matplotlib.pyplot.scatter(xPoints,yPoints1)
    # matplotlib.pyplot.show()

    # matplotlib.pyplot.scatter(xPoints,yPoints2)
    # matplotlib.pyplot.show()
    
    matplotlib.pyplot.autoscale(enable=True, axis='both', tight=None)
    matplotlib.pyplot.scatter(xPoints,yPoints3)
    matplotlib.pyplot.show()
          
    xPoints = []
    yPoints1 = []
    yPoints2 = []
    
    '''
    beginningPosition = 0
    currentPosition = 0
    endingPosition = 0
    currentSumPW = 0.0
    currentUD_beginningPosition_endingPosition = 0
    nextSumPW = 0.0
    nextUD_beginningPosition_endingPosition = 0
    while beginningPosition < len(docPostingTupleList):
        # print currentPosition,currentTuple
        currentTuple = docPostingTupleList[currentPosition]
        (_,currentPW) = currentTuple
        currentSumPW += currentPW
        currentUD_beginningPosition_endingPosition = p1 * math.pow(currentSumPW, 1) + p2 * math.pow(currentSumPW, 2) + p3 * math.pow(currentSumPW, 3) + p4 * math.pow(currentSumPW, 4) + p5 * math.pow(currentSumPW, 5) + p6 * math.pow(currentSumPW, 6) + p7 * math.pow(currentSumPW, 7) + p8 * math.pow(currentSumPW, 8) + p9 * math.pow(currentSumPW, 9) + p10 * math.pow(currentSumPW, 10) 
        currentUDP = currentUD_beginningPosition_endingPosition / ( currentPosition - beginningPosition + 1)
        print "current",currentUD_beginningPosition_endingPosition,beginningPosition,currentPosition,currentUDP
        
        nextTuple = docPostingTupleList[currentPosition+1]
        (_,nextPW) = nextTuple
        nextSumPW = currentSumPW + nextPW
        nextUD_beginningPosition_endingPosition = p1 * math.pow(nextSumPW, 1) + p2 * math.pow(nextSumPW, 2) + p3 * math.pow(nextSumPW, 3) + p4 * math.pow(nextSumPW, 4) + p5 * math.pow(nextSumPW, 5) + p6 * math.pow(nextSumPW, 6) + p7 * math.pow(nextSumPW, 7) + p8 * math.pow(nextSumPW, 8) + p9 * math.pow(nextSumPW, 9) + p10 * math.pow(nextSumPW, 10) 
        nextUDP = nextUD_beginningPosition_endingPosition / ( currentPosition + 1 - beginningPosition + 1)
        print "next",nextUD_beginningPosition_endingPosition,beginningPosition,currentPosition+1,nextUDP       
        
        if nextUDP >= currentUDP:
            currentPosition += 1
        else:
            # form an entry
            print beginningPosition,currentPosition,currentUD_beginningPosition_endingPosition,currentUDP
            print
            beginningPosition = currentPosition
            currentPosition += 1
    '''

# on dodo:
termIDWithProbablityDict = {}
ifn = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermsFrom100KWithTermIDANDGoodTuringProbabilites_20140510"
inputFileHandler0 = open(ifn,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentTermID = int(lineElements[0])
    currentProbablity = float(lineElements[2])
    if currentTermID not in termIDWithProbablityDict:
        termIDWithProbablityDict[currentTermID] = currentProbablity
print "len(termIDWithProbablityDict):",len(termIDWithProbablityDict)
inputFileHandler0.close()

# options:
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART0_1_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART2_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART3_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART4_DOC.binary"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

numOfBytesRead = 0
numOfDocsProcessed = 0
# numOfDocsProcessed = 6000000


while numOfBytesRead <= fileSize:
    if numOfDocsProcessed % 100000 == 0:
        print numOfDocsProcessed,numOfBytesRead
    # print "numOfBytesRead:",numOfBytesRead
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    print "docID:",str(docID),str(score1)
    
    xdocValue = 0
    docPostingTupleList = []
    for index in range(0,score1):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,staticProbability,dynamicProbability,combinedProbability,impactScore) = unpack( "1I4f", byteString)
        # print index,termID,staticProbability,dynamicProbability,combinedProbability,impactScore
        currentProbablityValue = 0.0
        if termID not in termIDWithProbablityDict:
            currentProbablityValue = 0.0000000012845
            # print "----->",termID,currentProbablityValue,"UNK"
            # print
            xdocValue += currentProbablityValue
            docPostingTupleList.append((termID,currentProbablityValue))
        else:
            currentProbablityValue = termIDWithProbablityDict[termID]
            # print "----->",termID,currentProbablityValue,"FOUND"
            # print
            xdocValue += currentProbablityValue
            docPostingTupleList.append((termID,currentProbablityValue))
    print "xdocValue:",xdocValue
    print "len(docPostingTupleList):",len(docPostingTupleList)
    docPostingTupleList.sort(cmp=None, key=itemgetter(1), reverse=True)
    print docID,len(docPostingTupleList)
    print "find"
    print docPostingTupleList
    computeChucksOfPostingsForCurrentDocument(docPostingTupleList)
    exit(1)
    
    
    if docID == 780000:
        exit(1)
    
    
    print
    numOfBytesRead += 4
    numOfBytesRead += 4
    numOfBytesRead += score1 * 4 * 5
    

    
    inputFileHandler0.seek(numOfBytesRead)
    

    numOfDocsProcessed += 1

print numOfDocsProcessed,numOfBytesRead   
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName
exit(1)





queryContentDict = {}
inputFileName = "/team/06.efficiency_topics.all_100k_query_log"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(":")
    queryContent = lineElements[1]
    if queryContent not in queryContentDict:
        queryContentDict[queryContent] = 1
    else:
        print queryContent
        # exit(1)

print "len(queryContentDict):",len(queryContentDict)
inputFileHandler0.close()
exit(1)

queryContentDict = {}
inputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/reproduceRomanQueryEffectiveness_20140121Night"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(":")
    queryContent = lineElements[1]
    if queryContent not in queryContentDict:
        queryContentDict[queryContent] = 1
    else:
        print queryContent,"duplicated detected"
        exit(1)

inputFileHandler0.close()
exit(1)



sumNumOfPostings = 0
# ...
#inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_50%_20140525"
#inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_60%_20140525"
# 4516362890
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_70%_20140525"
# 5161558381
#inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_80%_20140525"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_90%_20140525"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    sumNumOfPostings += int(currentLineElements[1])
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "OVERALL"
print "sumNumOfPostings:",sumNumOfPostings
exit(1)

##################################################module1
docIDsDictFromFile1 = {}
# AND case
inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/rawResults_uniform_impactScore_listLength_100%_TOP10_AND_20131210Night"
print "inputFileName1:",inputFileName1
inputFileHanlder1 = open(inputFileName1,"r")
currentQueryID = ""
currentLine = inputFileHanlder1.readline()
numOfTOP10DocumentResults1 = 0
currentRank = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    if currentLine.strip().startswith("qid:"):
        # print currentLineElements
        currentQueryID = currentLineElements[1]
        currentRank = 0
    
    if len(currentLineElements) == 14 and currentLineElements[-1].startswith("GX"):
        currentRank += 1
        if currentRank <= 10:
            # print currentRank,currentLine.strip()
            currentDocID = currentLineElements[-2]
            if currentDocID not in docIDsDictFromFile1:
                docIDsDictFromFile1[currentDocID] = 1
            else:
                docIDsDictFromFile1[currentDocID] += 1
            numOfTOP10DocumentResults1 += 1
    currentLine = inputFileHanlder1.readline()
print "len(docIDsDictFromFile1):",len(docIDsDictFromFile1)
##################################################

##################################################module2
# The following assignments are ONLY fitted for Xdoc document assignment scheme
# 10 debug%
# 16234 1%
# 58892 3%
# 113151 5%
# 303078 10%
# 573053 15%
# 960415 20%
# 1704697 30%
# 3048106 40%
# 4840867 50%
# 7078881 60%
# 9726244 70%
# 13270243 80%
# 17993779 90%
# 25205179 100%
docIDsDictFromFile2 = {}
numOfDocsAtDebugPercent = 10
numOfDocsAt1Percent = 16234
numOfDocsAt3Percent = 58892
numOfDocsAt5Percent = 113151
numOfDocsAt10Percent = 303078
numOfDocsAt15Percent = 573053
numOfDocsAt20Percent = 960415
numOfDocsAt30Percent = 1704697
numOfDocsAt40Percent = 3048106
numOfDocsAt50Percent = 4840867
numOfDocsAt60Percent = 7078881
numOfDocsAt70Percent = 9726244
numOfDocsAt80Percent = 13270243
numOfDocsAt90Percent = 17993779 

print "numOfDocsAtDebugPercent:",numOfDocsAtDebugPercent
print "numOfDocsAt1Percent:",numOfDocsAt1Percent
print "numOfDocsAt3Percent:",numOfDocsAt3Percent
print "numOfDocsAt5Percent:",numOfDocsAt5Percent
print "numOfDocsAt10Percent:",numOfDocsAt10Percent
print "numOfDocsAt15Percent:",numOfDocsAt15Percent
print "numOfDocsAt20Percent:",numOfDocsAt20Percent
print "numOfDocsAt30Percent:",numOfDocsAt30Percent
print "numOfDocsAt40Percent:",numOfDocsAt40Percent
print "numOfDocsAt50Percent:",numOfDocsAt50Percent
print "numOfDocsAt60Percent:",numOfDocsAt60Percent
print "numOfDocsAt70Percent:",numOfDocsAt70Percent
print "numOfDocsAt80Percent:",numOfDocsAt80Percent
print "numOfDocsAt90Percent:",numOfDocsAt90Percent

docIDDict0 = {}
docIDDict0ActiveFlag = True


currentDocDict = {}
docIDDict1ActiveFlag = True


docIDDict2 = {}
docIDDict2ActiveFlag = True


docIDDict3 = {}
docIDDict3ActiveFlag = True


docIDDict4 = {}
docIDDict4ActiveFlag = True


docIDDict5 = {}
docIDDict5ActiveFlag = True


docIDDict6 = {}
docIDDict6ActiveFlag = True


docIDDict7 = {}
docIDDict7ActiveFlag = True


docIDDict8 = {}
docIDDict8ActiveFlag = True


docIDDict9 = {}
docIDDict9ActiveFlag = True


docIDDict10 = {}
docIDDict10ActiveFlag = True


docIDDict11 = {}
docIDDict11ActiveFlag = True


docIDDict12 = {}
docIDDict12ActiveFlag = True


docIDDict13 = {}
docIDDict13ActiveFlag = True


# option2: sorted by Xdoc procedure
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/gov2_Docs_with_TheirXdocValues_Since20140428_sortedByXdocValues"
inputFileHanlder = open(inputFileName1,"r")
currentLine = inputFileHanlder.readline()
currentLineCounter = 1
while currentLine:
    if currentLineCounter % 10000 == 0:
        print "currentLineCounter:",currentLineCounter,"processed."
        
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = currentLineElements[1]
    
    if currentDocID not in docIDDict0 and docIDDict0ActiveFlag:
        docIDDict0[currentDocID] = 1
    

    if currentDocID not in currentDocDict and docIDDict1ActiveFlag:
        currentDocDict[currentDocID] = 1
    

    if currentDocID not in docIDDict2 and docIDDict2ActiveFlag:
        docIDDict2[currentDocID] = 1
    

    if currentDocID not in docIDDict3 and docIDDict3ActiveFlag:
        docIDDict3[currentDocID] = 1
    

    if currentDocID not in docIDDict4 and docIDDict4ActiveFlag:
        docIDDict4[currentDocID] = 1
    

    if currentDocID not in docIDDict5 and docIDDict5ActiveFlag:
        docIDDict5[currentDocID] = 1
    

    if currentDocID not in docIDDict6 and docIDDict6ActiveFlag:
        docIDDict6[currentDocID] = 1
    

    if currentDocID not in docIDDict7 and docIDDict7ActiveFlag:
        docIDDict7[currentDocID] = 1
    

    if currentDocID not in docIDDict8 and docIDDict8ActiveFlag:
        docIDDict8[currentDocID] = 1
    

    if currentDocID not in docIDDict9 and docIDDict9ActiveFlag:
        docIDDict9[currentDocID] = 1
    
    if currentDocID not in docIDDict10 and docIDDict10ActiveFlag:
        docIDDict10[currentDocID] = 1
    
    if currentDocID not in docIDDict11 and docIDDict11ActiveFlag:
        docIDDict11[currentDocID] = 1
    
    if currentDocID not in docIDDict12 and docIDDict12ActiveFlag:
        docIDDict12[currentDocID] = 1
    
    if currentDocID not in docIDDict13 and docIDDict13ActiveFlag:
        docIDDict13[currentDocID] = 1
    
    
    # need to set the flag to False when fill up the whole dict
    if len(docIDDict0) == numOfDocsAtDebugPercent:
        docIDDict0ActiveFlag = False
     
    if len(currentDocDict) == numOfDocsAt1Percent:
        docIDDict1ActiveFlag = False
        # switch 1
        # break
    
    if len(docIDDict2) == numOfDocsAt3Percent:
        docIDDict2ActiveFlag = False
    
    if len(docIDDict3) == numOfDocsAt5Percent:
        docIDDict3ActiveFlag = False
    
    if len(docIDDict4) == numOfDocsAt10Percent:
        docIDDict4ActiveFlag = False
    
    if len(docIDDict5) == numOfDocsAt15Percent:
        docIDDict5ActiveFlag = False
    
    if len(docIDDict6) == numOfDocsAt20Percent:
        docIDDict6ActiveFlag = False
    
    if len(docIDDict7) == numOfDocsAt30Percent:
        docIDDict7ActiveFlag = False
    
    if len(docIDDict8) == numOfDocsAt40Percent:
        docIDDict8ActiveFlag = False
    
    if len(docIDDict9) == numOfDocsAt50Percent:
        docIDDict9ActiveFlag = False
    
    if len(docIDDict10) == numOfDocsAt60Percent:
        docIDDict10ActiveFlag = False
    
    if len(docIDDict11) == numOfDocsAt70Percent:
        docIDDict11ActiveFlag = False
    
    if len(docIDDict12) == numOfDocsAt80Percent:
        docIDDict12ActiveFlag = False
    
    if len(docIDDict13) == numOfDocsAt90Percent:
        docIDDict13ActiveFlag = False
 
    currentLine = inputFileHanlder.readline()
    currentLineCounter += 1

print "len(docIDDict0):",len(docIDDict0)
print "len(currentDocDict):",len(currentDocDict)
print "len(docIDDict2):",len(docIDDict2)
print "len(docIDDict3):",len(docIDDict3)
print "len(docIDDict4):",len(docIDDict4)
print "len(docIDDict5):",len(docIDDict5)
print "len(docIDDict6):",len(docIDDict6)
print "len(docIDDict7):",len(docIDDict7)
print "len(docIDDict8):",len(docIDDict8)
print "len(docIDDict9):",len(docIDDict9)
print "len(docIDDict10):",len(docIDDict10)
print "len(docIDDict11):",len(docIDDict11)
print "len(docIDDict12):",len(docIDDict12)
print "len(docIDDict13):",len(docIDDict13)
inputFileHanlder.close()
##################################################

print "numOfTOP10DocumentResultsFromANDHead95K:",numOfTOP10DocumentResults1
intersectionSet0 = set(docIDsDictFromFile1).intersection( set(docIDDict0) )
intersectionSet1 = set(docIDsDictFromFile1).intersection( set(currentDocDict) )
intersectionSet2 = set(docIDsDictFromFile1).intersection( set(docIDDict2) )
intersectionSet3 = set(docIDsDictFromFile1).intersection( set(docIDDict3) )
intersectionSet4 = set(docIDsDictFromFile1).intersection( set(docIDDict4) )
intersectionSet5 = set(docIDsDictFromFile1).intersection( set(docIDDict5) )
intersectionSet6 = set(docIDsDictFromFile1).intersection( set(docIDDict6) )
intersectionSet7 = set(docIDsDictFromFile1).intersection( set(docIDDict7) )
intersectionSet8 = set(docIDsDictFromFile1).intersection( set(docIDDict8) )
intersectionSet9 = set(docIDsDictFromFile1).intersection( set(docIDDict9) )
intersectionSet10 = set(docIDsDictFromFile1).intersection( set(docIDDict10) )
intersectionSet11 = set(docIDsDictFromFile1).intersection( set(docIDDict11) )
intersectionSet12 = set(docIDsDictFromFile1).intersection( set(docIDDict12) )
intersectionSet13 = set(docIDsDictFromFile1).intersection( set(docIDDict13) )

overlapRate0 = len(intersectionSet0) / len(docIDsDictFromFile1)
overlapRate1 = len(intersectionSet1) / len(docIDsDictFromFile1)
overlapRate2 = len(intersectionSet2) / len(docIDsDictFromFile1)
overlapRate3 = len(intersectionSet3) / len(docIDsDictFromFile1)
overlapRate4 = len(intersectionSet4) / len(docIDsDictFromFile1)
overlapRate5 = len(intersectionSet5) / len(docIDsDictFromFile1)
overlapRate6 = len(intersectionSet6) / len(docIDsDictFromFile1)
overlapRate7 = len(intersectionSet7) / len(docIDsDictFromFile1)
overlapRate8 = len(intersectionSet8) / len(docIDsDictFromFile1)
overlapRate9 = len(intersectionSet9) / len(docIDsDictFromFile1)
overlapRate10 = len(intersectionSet10) / len(docIDsDictFromFile1)
overlapRate11 = len(intersectionSet11) / len(docIDsDictFromFile1)
overlapRate12 = len(intersectionSet12) / len(docIDsDictFromFile1)
overlapRate13 = len(intersectionSet13) / len(docIDsDictFromFile1)

print "intersectionSets:"

print len(intersectionSet0)
print len(intersectionSet1)
print len(intersectionSet2)
print len(intersectionSet3)
print len(intersectionSet4)
print len(intersectionSet5)
print len(intersectionSet6)
print len(intersectionSet7)
print len(intersectionSet8)
print len(intersectionSet9)
print len(intersectionSet10)
print len(intersectionSet11)
print len(intersectionSet12)
print len(intersectionSet13)

print "overlapRate0:",overlapRate0
print "overlapRate1:",overlapRate1
print "overlapRate2:",overlapRate2
print "overlapRate3:",overlapRate3
print "overlapRate4:",overlapRate4
print "overlapRate5:",overlapRate5
print "overlapRate6:",overlapRate6
print "overlapRate7:",overlapRate7
print "overlapRate8:",overlapRate8
print "overlapRate9:",overlapRate9
print "overlapRate10:",overlapRate10
print "overlapRate11:",overlapRate11
print "overlapRate12:",overlapRate12
print "overlapRate13:",overlapRate13

print overlapRate0
print overlapRate1
print overlapRate2
print overlapRate3
print overlapRate4
print overlapRate5
print overlapRate6
print overlapRate7
print overlapRate8
print overlapRate9
print overlapRate10
print overlapRate11
print overlapRate12
print overlapRate13

exit(1)

totalNum = 25205179
print totalNum * 0.01
print totalNum * 0.03
print totalNum * 0.05
print totalNum * 0.10
print totalNum * 0.15
print totalNum * 0.20
print totalNum * 0.30
print totalNum * 0.40
print totalNum * 0.50
print totalNum * 0.60
print totalNum * 0.70
print totalNum * 0.80
print totalNum * 0.90
exit(1)

sumNumOfPostings = 0
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_1%_20140524"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    sumNumOfPostings += int(lineElements[1])
inputFileHandler0.close()
print "sumNumOfPostings:",sumNumOfPostings
exit(1)

print "Begins..."

currentMinDocID = 99999999
currentDocDict = []
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/gov2_Docs_with_TheirXdocValues_Since20140428_sortedByXdocValues"
inputFileHandler0 = open(inputFileName1,"r")
currentLine = inputFileHandler0.readline()
numOfQueriesFallingThrough = 1
numOfDocsIn = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = int(currentLineElements[1])
    if currentDocID <= 99999:
        numOfDocsIn += 1
    
    if currentMinDocID > currentDocID:
        currentMinDocID = currentDocID
    
    currentDocDict.append(currentDocID)
    
    if numOfQueriesFallingThrough == 16234: # 1%
        break
    
    
    currentLine = inputFileHandler0.readline()
    numOfQueriesFallingThrough += 1
inputFileHandler0.close()
currentDocDict.sort(cmp=None, key=None, reverse=False)

print "currentDocDict[:10]:",currentDocDict[:10]
print "Overall:"
print "inputFileName1:",inputFileName1
print "numOfDocsIn:",numOfDocsIn
print "currentMinDocID:",currentMinDocID
print "Ends."
exit(1)

print "Begins..."
docIDWithNumOfPostingsRecordedDict = {}
ifn = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset_sortedByDocID"
inputFileHandler0 = open(ifn,"r")
sumOfPostings = 0
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = int(currentLineElements[0])
    currentNumOfPostings = int(currentLineElements[1])
    if currentDocID not in docIDWithNumOfPostingsRecordedDict:
        docIDWithNumOfPostingsRecordedDict[currentDocID] = currentNumOfPostings
        sumOfPostings += currentNumOfPostings
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "Overall:"
print "sumOfPostings:",sumOfPostings
print "len(docIDWithNumOfPostingsRecordedDict):",len(docIDWithNumOfPostingsRecordedDict)

sumOfPostings = 0
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/gov2_Docs_with_TheirXdocValues_Since20140428_sortedByXdocValues"
inputFileHandler0 = open(inputFileName1,"r")
currentLine = inputFileHandler0.readline()
numOfQueriesFallingThrough = 1
numOfDocsIn = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = int(currentLineElements[1])
    if currentDocID <= 99999:
        numOfDocsIn += 1
        sumOfPostings += docIDWithNumOfPostingsRecordedDict[currentDocID]
    if numOfQueriesFallingThrough == 16234:
        break
    
    currentLine = inputFileHandler0.readline()
    numOfQueriesFallingThrough += 1
inputFileHandler0.close()

print "Overall:"
print "inputFileName1:",inputFileName1
print "numOfDocsIn:",numOfDocsIn
print "sumOfPostings:",sumOfPostings
print "Ends."
exit(1)


# 0
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_debug%_20140524"
# 370112
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_1%_20140524"
# 1164243
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_3%_20140524"
# 2118614
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_5%_20140524"
# 5177292
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_10%_20140524"
# 8569990
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_15%_20140524"
# 12560892
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_20%_20140524"
# 21689484
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_30%_20140524"
# 30929734
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_40%_20140524"
# 40571899
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_50%_20140524"
# 50130187
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_60%_20140524"
# 61109963
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_70%_20140524"
# 71723258
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_80%_20140524"
# 81293266
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/xDocDocumentPartitionBasedInvertedIndexStatistics_90%_20140524"
inputFileHandler0 = open(inputFileName,"r")
sumNumOfPostings = 0
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    sumNumOfPostings += int(currentLineElements[1])
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName
print "sumNumOfPostings:",sumNumOfPostings
exit(1)

sumOfCPUEvaluationCost = 0
inputFileName = "/home/diaosi/filesToBeDoubt/100KQueries_tail_5k_withCPUEvaluationCost_20140422"
inputFileHandler0 = open(inputFileName,"r")
outputFileName = "/home/diaosi/filesToBeDoubt/100KQueries_tail_5k_withCPUEvaluationCost_20140524_reformatted"
outputFileHandler = open(outputFileName,"w")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(":")
    queryContent = lineElements[1]
    CPUEvaluationCost = int(lineElements[0].strip().split(" ")[0])
    sumOfCPUEvaluationCost += CPUEvaluationCost
    # print lineElements[0]
    # print lineElements[1]
    queryID = lineElements[0].strip().split(" ")[1]
    outputFileHandler.write(queryID + "," + queryContent + "," + str(CPUEvaluationCost) + "\n")
outputFileHandler.close()
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
print "sumOfCPUEvaluationCost:",sumOfCPUEvaluationCost
exit(1)

# options:
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/LEAVE_selectedDocumentPostingValuesInfo_0_3M_DOC_20140518_TOP100Postings.binary"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

numOfBytesRead = 0
numOfDocsProcessed = 0
# numOfDocsProcessed = 6000000

while numOfBytesRead <= fileSize:
    if numOfDocsProcessed % 100000 == 0:
        print numOfDocsProcessed,numOfBytesRead
    # print "numOfBytesRead:",numOfBytesRead
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    print "docID:",str(docID),str(score1)
    termIDDictForCurrentDoc = {}
    termIDListForCurrentDoct = []
    for index in range(0,score1):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,staticProbability,dynamicProbability,combinedProbability,impactScore) = unpack( "1I4f", byteString)
        print index,termID,staticProbability,dynamicProbability,combinedProbability,impactScore
        if termID not in termIDDictForCurrentDoc:
            termIDDictForCurrentDoc[termID] = 1
        else:
            print "Oh, my god. Fatal problem !"
            exit(1)
    termIDListForCurrentDoct = list(termIDDictForCurrentDoc)
    termIDListForCurrentDoct.sort()
    print "termIDListForCurrentDoct:",termIDListForCurrentDoct
    print "len(termIDDictForCurrentDoc):",len(termIDDictForCurrentDoc)
    assert score1 == len(termIDDictForCurrentDoc)
    print
    exit(1)

    numOfBytesRead += 4
    numOfBytesRead += 4
    numOfBytesRead += score1 * 4 * 5
    inputFileHandler0.seek(numOfBytesRead)


    numOfDocsProcessed += 1

print numOfDocsProcessed,numOfBytesRead
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName

inputFileHandler0.close()
print "auxFileGeneration() ends."
exit(1)


# options:
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/LEAVE_selectedDocumentPostingValuesInfo_PART0_1_DOC_20140518_TOP100Postings.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART0_1_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART2_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART3_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART4_DOC.binary"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

numOfBytesRead = 0
numOfDocsProcessed = 0
# numOfDocsProcessed = 6000000


while numOfBytesRead <= fileSize:
    if numOfDocsProcessed % 100000 == 0:
        print numOfDocsProcessed,numOfBytesRead
    # print "numOfBytesRead:",numOfBytesRead
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    print "(docID,# of postings) pair:",str(docID),str(score1)
    
    termIDsFromCurrentDoc = []
    for index in range(0,score1):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,staticProbability,dynamicProbability,combinedProbability,impactScore) = unpack( "1I4f", byteString)
        print index,termID,staticProbability,dynamicProbability,combinedProbability,impactScore
        termIDsFromCurrentDoc.append(termID)

    if docID == 2:
        termIDsFromCurrentDoc.sort(cmp=None, key=None, reverse=False)
        print "termIDsFromCurrentDoc:",termIDsFromCurrentDoc 
        exit(1)
    
    if docID == 780000:
        exit(1)
    print
    
    
    numOfBytesRead += 4
    numOfBytesRead += 4
    numOfBytesRead += score1 * 4 * 5
    inputFileHandler0.seek(numOfBytesRead)
    numOfDocsProcessed += 1

print numOfDocsProcessed,numOfBytesRead   
inputFileHandler0.close()
print "Overall:"
print "inputFileName1:",inputFileName1
exit(1)

docIDWithNumOfPostingsDict = {}
totalNumOfPostings = 0
totalNumOfBytes = 0
ifn = "/data/obukai/workspace/web-search-engine-wei-2014-Feb/polyIRIndexer/docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset"
inputFileHandler0 = open(ifn,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
     currentLineElements = currentLine.strip().split(" ")
     currentDocID = currentLineElements[0]
     currentNumOfPostings = int(currentLineElements[1])
     totalNumOfPostings += currentNumOfPostings
     totalNumOfBytes += 4 + 4 + currentNumOfPostings * 3 * 4
     if currentDocID not in docIDWithNumOfPostingsDict:
         docIDWithNumOfPostingsDict[currentDocID] = currentNumOfPostings 
     currentLine = inputFileHandler0.readline()

print "OVERALL:"
print "totalNumOfPostings:",totalNumOfPostings
print "totalNumOfBytes:",totalNumOfBytes
print "len(docIDWithNumOfPostingsDict):",len(docIDWithNumOfPostingsDict)
inputFileHandler0.close()

# options:
inputFileName1 = "/data/jrodri04/prune/forward.idx.static.wei"
# inputFileName1 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_6M_12M_DOC_20140518_TOP100Postings.binary"
# inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/LEAVE_selectedDocumentPostingValuesInfo_PART0_1_DOC_20140518_TOP100Postings.binary"
# inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART0_1_DOC.binary"
# inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART2_DOC.binary"
# inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART3_DOC.binary"
# inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART4_DOC.binary"
inputFileHandler0 = open(inputFileName1,"rb")
statinfo = os.stat(inputFileName1)
fileSize = statinfo.st_size
print "file size:",fileSize

numOfBytesRead = 0
numOfDocsProcessed = 0
# numOfDocsProcessed = 6000000


while numOfBytesRead <= fileSize:
    if numOfDocsProcessed % 100000 == 0:
        print numOfDocsProcessed,numOfBytesRead
    # print "numOfBytesRead:",numOfBytesRead
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    print "(docID,# of postings) pair:",str(docID),str(score1)
    if docIDWithNumOfPostingsDict[str(docID)] == score1:
        pass
    else:
        print "Inconsistent Data detected:"
        print "docID:",docID
        print "docIDWithNumOfPostingsDict[str(docID)]:",docIDWithNumOfPostingsDict[str(docID)]
        print "score1 get from the file:",score1
        print "fileSize:",fileSize
        print "numOfBytesRead:",numOfBytesRead
        exit(1)
    
    '''
    for index in range(0,score1):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,staticProbability,dynamicProbability,combinedProbability,impactScore) = unpack( "1I4f", byteString)
        print index,termID,staticProbability,dynamicProbability,combinedProbability,impactScore
    exit(1)
    if docID == 780000:
        exit(1)
    print
    '''
    
    numOfBytesRead += 4
    numOfBytesRead += 4
    numOfBytesRead += score1 * 4 * 5
    inputFileHandler0.seek(numOfBytesRead)
    numOfDocsProcessed += 1

print numOfDocsProcessed,numOfBytesRead   
inputFileHandler0.close()
print "Overall:"
print "inputFileName1:",inputFileName1
exit(1)

inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/data/secondFactorProbability/numeratorTable_fromTOP100Postings_20140518"
inputFileHandler0 = open(inputFileName,"r")
sumNumOfPostings = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    for currentValueInString in lineElements[1:]:
        sumNumOfPostings += int(currentValueInString)
print "sumNumOfPostings:",sumNumOfPostings
inputFileHandler0.close()
exit(1)

# options:
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART0_1_DOC.binary"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

numOfBytesRead = 0
numOfDocsProcessed = 0
# numOfDocsProcessed = 6000000

while numOfBytesRead <= fileSize:
    if numOfDocsProcessed % 100000 == 0:
        print numOfDocsProcessed,numOfBytesRead
    # print "numOfBytesRead:",numOfBytesRead
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    print "docID:",str(docID),str(score1)

    for index in range(0,score1):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,staticProbability,dynamicProbability,combinedProbability,impactScore) = unpack( "1I4f", byteString)
        print index,termID,staticProbability,dynamicProbability,combinedProbability,impactScore
    exit(1)
    numOfBytesRead += 4
    numOfBytesRead += 4
    numOfBytesRead += score1 * 4 * 5
    inputFileHandler0.seek(numOfBytesRead)


    numOfDocsProcessed += 1

print numOfDocsProcessed,numOfBytesRead
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName

inputFileHandler0.close()
print "auxFileGeneration() ends."
exit(1)

# get all the related document results from the human judge side
docDict = {}
# for pangolin:
# inputFileName1 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/data/qrels.tb04-tb06.top150_with_docID_added_termid_added_AND"
# for dodo:
inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/qrels.tb04-tb06.top150_with_docID_added_termid_added_AND"
inputFileHandler0 = open(inputFileName1,"r")
for index,line in enumerate( inputFileHandler0.readlines() ):
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    relevanceLabel = lineElements[3]
    currentDocID = lineElements[4]
    if relevanceLabel != "0":
        if currentDocID not in docDict:
            docDict[currentDocID] = 1
print "len(docDict):",len(docDict)

inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_Xdocs_and_variations_20140505Night"
inputFileHandler0 = open(inputFileName2,"r")

avgNumOfPostings = 0
sumNumOfPostingsOverWholeCollection = 0
sumNumOfPostings = 0
minNumOfPostings = 9999
minNumOfPostingsCorrespondingDocID = 0
maxNumOfPostings = 0
maxNumOfPostingsCorrespondingDocID = 0

avgNumOfTokens = 0
sumNumOfTokensOverWholeCollection = 0
sumNumOfTokens = 0
minNumOfTokens = 9999
minNumOfTokensCorrespondingDocID = 0
maxNumOfTokens = 0
maxNumOfTokensCorrespondingDocID = 0


numDocs = 0
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = currentLineElements[1]
    currentDocNumOfPostings = int(currentLineElements[2])
    currentDocNumOfTokens = int(currentLineElements[3])
    sumNumOfPostingsOverWholeCollection += currentDocNumOfPostings 
    sumNumOfTokensOverWholeCollection += currentDocNumOfTokens
    if currentDocID in docDict:
        numDocs += 1
        if minNumOfPostings > currentDocNumOfPostings:
            minNumOfPostings = currentDocNumOfPostings
            minNumOfPostingsCorrespondingDocID = currentDocID
        if maxNumOfPostings < currentDocNumOfPostings:
            maxNumOfPostings = currentDocNumOfPostings
            maxNumOfPostingsCorrespondingDocID = currentDocID
        
        if minNumOfTokens > currentDocNumOfTokens:
            minNumOfTokens = currentDocNumOfTokens
            minNumOfTokensCorrespondingDocID = currentDocID
        if maxNumOfTokens < currentDocNumOfTokens:
            maxNumOfTokens = currentDocNumOfTokens
            maxNumOfTokensCorrespondingDocID = currentDocID

        sumNumOfTokens += currentDocNumOfTokens
        sumNumOfPostings += currentDocNumOfPostings
    currentLine = inputFileHandler0.readline()

avgNumOfPostings = sumNumOfPostings / numDocs
avgNumOfTokens = sumNumOfTokens / numDocs

print "sumNumOfPostingsOverWholeCollection:",sumNumOfPostingsOverWholeCollection 
print "sumNumOfPostings:",sumNumOfPostings
print "numDocs:",numDocs
print "avgNumOfPostings:",avgNumOfPostings
print "maxNumOfPostings:",maxNumOfPostings
print "minNumOfPostings:",minNumOfPostings
print "maxNumOfPostingsCorrespondingDocID:",maxNumOfPostingsCorrespondingDocID
print "minNumOfPostingsCorrespondingDocID:",minNumOfPostingsCorrespondingDocID

print "sumNumOfTokensOverWholeCollection:",sumNumOfTokensOverWholeCollection 
print "sumNumOfTokens:",sumNumOfTokens
print "numDocs:",numDocs
print "avgNumOfTokens:",avgNumOfTokens
print "maxNumOfTokens:",maxNumOfTokens
print "minNumOfTokens:",minNumOfTokens
print "maxNumOfTokensCorrespondingDocID:",maxNumOfTokensCorrespondingDocID
print "minNumOfTokensCorrespondingDocID:",minNumOfTokensCorrespondingDocID


exit(1)

print "step0:"
trecIDANDDocIDMappingTable = {}
ifn = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_MappingTableForGov2Dataset"
inputFileHandler0 = open(ifn,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentTrecID = currentLine.strip().split(" ")[0]
    currentDocID = currentLine.strip().split(" ")[1]
    trecIDANDDocIDMappingTable[currentTrecID] = currentDocID
    if len(trecIDANDDocIDMappingTable) % 1000000 == 0:
        print len(trecIDANDDocIDMappingTable)
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "len(trecIDANDDocIDMappingTable):",len(trecIDANDDocIDMappingTable)

print "step1:"
qidWithQueryTermsDict = {}
inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/AND_top100_100%_tb04-06_final_with_termID_and_score_added"
inputFileHandler0 = open(inputFileName1,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
      
    if currentQID not in qidWithQueryTermsDict:
        qidWithQueryTermsDict[currentQID] = []
      
    postingInfoList = lineElements[6:-1]
    numOfPostingInfos = int(len(postingInfoList) / 2)
    base = 0
    for i in range(0,numOfPostingInfos):
        currentTermID = postingInfoList[base]
        currentTermScore = float(postingInfoList[base+1])
        if currentTermID not in qidWithQueryTermsDict[currentQID]:
            qidWithQueryTermsDict[currentQID].append(currentTermID)
        base += 2

print "qidWithQueryTermsDict:",qidWithQueryTermsDict
print "qidWithQueryTermsDict['701']:",qidWithQueryTermsDict['701']
print "qidWithQueryTermsDict['702']:",qidWithQueryTermsDict['702']
inputFileHandler0.close()

print "step2:"
inputFileName2 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/testRanking_20140514_K1_B_option4_AND_TOP2M_reformatted"
inputFileHandler0 = open(inputFileName2,"r")
outputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/testRanking_20140514_K1_B_option4_AND_reformatted_termIDs_docID_added"
outputFileHandler = open(outputFileName,"w")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split("\t")
    currentQID = currentLineElements[0]
    currentTrecID = currentLineElements[2]
    if currentQID != "703":
        outputLine = currentLine.strip() + "\t"
        for queryTerm in qidWithQueryTermsDict[currentQID]:
            outputLine += queryTerm + "\t" + str(0) + "\t"
        outputLine += trecIDANDDocIDMappingTable[currentTrecID] + "\t"
        outputLine += "\n"
        print outputLine,
        outputFileHandler.write(outputLine)
    currentLine = inputFileHandler0.readline()
outputFileHandler.close()
inputFileHandler0.close()
print "OVERALL"
print "ifn:",ifn
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
exit(1)

print "****************************************************************************************************************************************************************"

print "**********Original Model**********"
qidWithTOP10DocumentResultsDictOriginal = {}
qidWithTOP20DocumentResultsDictOriginal = {}
TOP10DocumentResultsDictOriginal = {}
TOP20DocumentResultsDictOriginal = {}
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/AND_top100_100%_tb04-06_final_with_termID_and_score_added"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    currentTrecID = lineElements[2]
    currentRank = lineElements[3]
    
    # print currentQID,currentTrecID,currentRank
    
    if currentQID not in qidWithTOP10DocumentResultsDictOriginal:
        qidWithTOP10DocumentResultsDictOriginal[currentQID] = []
    
    if currentQID not in qidWithTOP20DocumentResultsDictOriginal:
        qidWithTOP20DocumentResultsDictOriginal[currentQID] = []    
    
    
    if len(qidWithTOP10DocumentResultsDictOriginal[currentQID]) < 10:
        qidWithTOP10DocumentResultsDictOriginal[currentQID].append(currentTrecID)
        currentDocumentResultKey = currentQID + "_" + currentTrecID
        TOP10DocumentResultsDictOriginal[currentDocumentResultKey] = 1
    else:
        pass
    
    
    if len(qidWithTOP20DocumentResultsDictOriginal[currentQID]) < 20:
        qidWithTOP20DocumentResultsDictOriginal[currentQID].append(currentTrecID)
        currentDocumentResultKey = currentQID + "_" + currentTrecID
        TOP20DocumentResultsDictOriginal[currentDocumentResultKey] = 1
    else:
        pass
    
    
print "len(qidWithTOP10DocumentResultsDictOriginal):",len(qidWithTOP10DocumentResultsDictOriginal)
print "len(qidWithTOP20DocumentResultsDictOriginal):",len(qidWithTOP20DocumentResultsDictOriginal)
print "qidWithTOP10DocumentResultsDictOriginal['701']:",qidWithTOP10DocumentResultsDictOriginal['701']
print "qidWithTOP10DocumentResultsDictOriginal['702']:",qidWithTOP10DocumentResultsDictOriginal['702']
print "qidWithTOP20DocumentResultsDictOriginal['701']:",qidWithTOP20DocumentResultsDictOriginal['701']
print "qidWithTOP20DocumentResultsDictOriginal['702']:",qidWithTOP20DocumentResultsDictOriginal['702']
print "len(TOP10DocumentResultsDictOriginal):",len(TOP10DocumentResultsDictOriginal)
print "len(TOP20DocumentResultsDictOriginal):",len(TOP20DocumentResultsDictOriginal)
# print "qidWithTOP10DocumentResultsDictOriginal.keys():",qidWithTOP10DocumentResultsDictOriginal.keys()
inputFileHandler0.close()

print "**********Improved Model**********"
qidWithTOP10DocumentResultsDictImproved = {}
qidWithTOP20DocumentResultsDictImproved = {}
TOP10DocumentResultsDictImproved = {}
TOP20DocumentResultsDictImproved = {}
inputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/testRanking_20140514_K1_B_option4_AND_reformatted"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split("\t")
    currentQID = lineElements[0]
    currentTrecID = lineElements[2]
    currentRank = lineElements[3]
    
    # print currentQID,currentTrecID,currentRank
    
    if currentQID not in qidWithTOP10DocumentResultsDictImproved:
        qidWithTOP10DocumentResultsDictImproved[currentQID] = []
    
    if currentQID not in qidWithTOP20DocumentResultsDictImproved:
        qidWithTOP20DocumentResultsDictImproved[currentQID] = []    
    
    
    if len(qidWithTOP10DocumentResultsDictImproved[currentQID]) < 10:
        qidWithTOP10DocumentResultsDictImproved[currentQID].append(currentTrecID)
        currentDocumentResultKey = currentQID + "_" + currentTrecID
        TOP10DocumentResultsDictImproved[currentDocumentResultKey] = 1
    else:
        pass
    
    
    if len(qidWithTOP20DocumentResultsDictImproved[currentQID]) < 20:
        qidWithTOP20DocumentResultsDictImproved[currentQID].append(currentTrecID)
        currentDocumentResultKey = currentQID + "_" + currentTrecID
        TOP20DocumentResultsDictImproved[currentDocumentResultKey] = 1
    else:
        pass
    
    
print "len(qidWithTOP10DocumentResultsDictImproved):",len(qidWithTOP10DocumentResultsDictImproved)
print "len(qidWithTOP20DocumentResultsDictImproved):",len(qidWithTOP20DocumentResultsDictImproved)
print "qidWithTOP10DocumentResultsDictImproved['701']:",qidWithTOP10DocumentResultsDictImproved['701']
print "qidWithTOP10DocumentResultsDictImproved['702']:",qidWithTOP10DocumentResultsDictImproved['702']
print "qidWithTOP20DocumentResultsDictImproved['701']:",qidWithTOP20DocumentResultsDictImproved['701']
print "qidWithTOP20DocumentResultsDictImproved['702']:",qidWithTOP20DocumentResultsDictImproved['702']
print "len(TOP10DocumentResultsDictImproved):",len(TOP10DocumentResultsDictImproved)
print "len(TOP20DocumentResultsDictImproved):",len(TOP20DocumentResultsDictImproved)
inputFileHandler0.close()

print "**********Intersection Analysis**********"
top10Union = set(TOP10DocumentResultsDictOriginal).union( set(TOP10DocumentResultsDictImproved) )
top10Intersection = set(TOP10DocumentResultsDictOriginal).intersection( set(TOP10DocumentResultsDictImproved) )
top20Union = set(TOP20DocumentResultsDictOriginal).union( set(TOP20DocumentResultsDictImproved) )
top20Intersection = set(TOP20DocumentResultsDictOriginal).intersection( set(TOP20DocumentResultsDictImproved) )
print "symmetric difference:"
print "Overall TOP10:",len(top10Intersection)/len(top10Union)
print "Overall TOP20:",len(top20Intersection)/len(top20Union)

for i in range(701,851):
    currentQID = str(i)
    if currentQID in qidWithTOP10DocumentResultsDictOriginal and currentQID in qidWithTOP10DocumentResultsDictImproved:
        testTOP10Union = set(qidWithTOP10DocumentResultsDictOriginal[currentQID]).union( set(qidWithTOP10DocumentResultsDictImproved[currentQID]) )
        testTOP10Intersection = set(qidWithTOP10DocumentResultsDictOriginal[currentQID]).intersection( set(qidWithTOP10DocumentResultsDictImproved[currentQID]) )

        testTOP20Union = set(qidWithTOP20DocumentResultsDictOriginal[currentQID]).union( set(qidWithTOP20DocumentResultsDictImproved[currentQID]) )
        testTOP20Intersection = set(qidWithTOP20DocumentResultsDictOriginal[currentQID]).intersection( set(qidWithTOP20DocumentResultsDictImproved[currentQID]) )
        print "currentQID:",currentQID
        # print "# of document results (original) TOP10:",len(qidWithTOP10DocumentResultsDictOriginal[currentQID])
        # print "# of document results (original) TOP20:",len(qidWithTOP20DocumentResultsDictOriginal[currentQID])
        # print "# of document results (improved) TOP10:",len(qidWithTOP10DocumentResultsDictImproved[currentQID])
        # print "# of document results (improved) TOP20:",len(qidWithTOP20DocumentResultsDictImproved[currentQID])
        print "# of document results (intersection) TOP10:",len(testTOP10Intersection)
        print "# of document results (union) TOP10:",len(testTOP10Union)
        print "# of document results (intersection) TOP20:",len(testTOP20Intersection)
        print "# of document results (union) TOP20:",len(testTOP20Union)
        print "TOP10 SD:",len(testTOP10Intersection)/len(testTOP10Union)
        print "TOP20 SD:",len(testTOP20Intersection)/len(testTOP20Union)
        print
    else:
        pass

exit(1)


inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/xDoc_analysis_queryLength1_20140512"
inputFileHandler0 = open(inputFileName,"r")
sumProbability = 0.0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    sumProbability += float(lineElements[1])
print "sumProbability:",sumProbability
exit(1)

# key: the terms which have been seen in the training queries
# value: which freq it belongs to
termsWithCorrespondingSpeciesBelongingToDict = {}
inputAuxFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130429"
inputAuxFileHanlder2 = open(inputAuxFileName2,"r")
inputAuxFileHanlder2.readline()
for line in inputAuxFileHanlder2.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    currentProbablity = float(lineElements[4])
    if currentTerm not in termsWithCorrespondingSpeciesBelongingToDict:
        termsWithCorrespondingSpeciesBelongingToDict[currentTerm] = currentProbablity
print "len(termsWithCorrespondingSpeciesBelongingToDict):",len(termsWithCorrespondingSpeciesBelongingToDict)
inputAuxFileHanlder2.close()


outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermsFrom100KWithTermIDANDGoodTuringProbabilites_20140510"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
inputFileHanlder = open(inputFileName,"r")
currentLine = inputFileHanlder.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTerm = currentLineElements[1]
    if currentTerm in termsWithCorrespondingSpeciesBelongingToDict:
        outputFileHandler.write(currentLine.strip() + " " + str(termsWithCorrespondingSpeciesBelongingToDict[currentTerm]) + "\n")
    currentLine = inputFileHanlder.readline()
inputFileHanlder.close()
outputFileHandler.close()

print "OVERALL:"
print "inputAuxFileName2:",inputAuxFileName2
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)

termIDANDTermDict = {}
inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/firstProbabilityTermsWithTermID"
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
inputFileHanlder = open(inputFileName,"r")
currentLine = inputFileHanlder.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = int(currentLineElements[0])
    currentTerm = currentLineElements[1]
    if currentTermID not in termIDANDTermDict:
        termIDANDTermDict[currentTermID] = currentTerm 
    currentLine = inputFileHanlder.readline()
print "len(termIDANDTermDict):",len(termIDANDTermDict)
inputFileHanlder.close()

# where is the probability component of the first probability? 
# key: # of times this object appears
# value: the probability that this term will occur in the next query
freq1stFactorProbabilityDict = {}

# key: the terms which have been seen in the training queries
# value: which freq it belongs to
termsWithCorrespondingSpeciesBelongingToDict = {}

inputAuxFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/good_turing_estimation_output_for_terms_0_1_95K_95%_probabilityInQueryAdded_20130731"
inputAuxFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/freqOfFreqInQueries_0_1_95K_95%_with_query_terms_appended_sortedByFreqR"
inputAuxFileHanlder1 = open(inputAuxFileName1,"r")
inputAuxFileHanlder2 = open(inputAuxFileName2,"r")

# skip 4 not related lines
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()

for line in inputAuxFileHanlder1.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int(lineElements[0])
    # the probability of this term appeared in the next query
    # currentProbability = float(lineElements[5])
    # the probability of this term appeared in the next query position
    currentProbability = float(lineElements[4])
    if currentProbability != 0:
        if currentFreq not in freq1stFactorProbabilityDict:
            freq1stFactorProbabilityDict[currentFreq] = currentProbability
        else:
            print "system error"
            exit(1)

# for debug
# print "len(freq1stFactorProbabilityDict):",len(freq1stFactorProbabilityDict)
# print "freq1stFactorProbabilityDict[0]:",freq1stFactorProbabilityDict[0]
# print "freq1stFactorProbabilityDict[1]:",freq1stFactorProbabilityDict[1]
# exit(1)

numOfFreq = 0
for line in inputAuxFileHanlder2.readlines():
    lineElements = line.strip().split(" ")
    freq = int( lineElements[0] )
    numOfTerms = int( lineElements[1] )
    if numOfTerms != 0:
        numOfFreq += 1
        if numOfTerms == len(lineElements[2:]):
            for term in lineElements[2:]:
                if term not in termsWithCorrespondingSpeciesBelongingToDict:
                    termsWithCorrespondingSpeciesBelongingToDict[term] = freq
        else:
            print "critical error, mark1"
    else:
        # just do NOT need to be processed
        pass


# for debug       
# print "termsWithCorrespondingSpeciesBelongingToDict['of']:",termsWithCorrespondingSpeciesBelongingToDict['of']
# print "numOfFreq:",numOfFreq
print "len(freq1stFactorProbabilityDict):",len(freq1stFactorProbabilityDict)
print "len(termsWithCorrespondingSpeciesBelongingToDict):",len(termsWithCorrespondingSpeciesBelongingToDict)
print "termsWithCorrespondingSpeciesBelongingToDict['soalr']:",termsWithCorrespondingSpeciesBelongingToDict["soalr"]
print "freq1stFactorProbabilityDict[ termsWithCorrespondingSpeciesBelongingToDict['soalr'] ]:",freq1stFactorProbabilityDict[ termsWithCorrespondingSpeciesBelongingToDict['soalr'] ]
inputAuxFileHanlder1.close()
inputAuxFileHanlder2.close()

# options:
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART0_1_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART2_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART3_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART4_DOC.binary"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

numOfBytesRead = 0
numOfDocsProcessed = 0
# numOfDocsProcessed = 6000000


while numOfBytesRead <= fileSize:
    if numOfDocsProcessed % 100000 == 0:
        print numOfDocsProcessed,numOfBytesRead
    # print "numOfBytesRead:",numOfBytesRead
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    print "docID:",str(docID),str(score1)
    xdocValue = 0
    for index in range(0,score1):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,staticProbability,dynamicProbability,combinedProbability,impactScore) = unpack( "1I4f", byteString)
        print index,termID,staticProbability,dynamicProbability,combinedProbability,impactScore
        currentProbablityValue = 0.0
        if termID not in termIDANDTermDict:
            currentProbablityValue = freq1stFactorProbabilityDict[0]
            print "----->",termID,currentProbablityValue
            print
            xdocValue += currentProbablityValue
        else:
            currentProbablityValue = freq1stFactorProbabilityDict[ termsWithCorrespondingSpeciesBelongingToDict[ termIDANDTermDict[termID] ] ]
            print "----->",termID,termIDANDTermDict[termID],currentProbablityValue
            print
            xdocValue += currentProbablityValue
    print "xdocValue:",xdocValue
    numOfBytesRead += 4
    numOfBytesRead += 4
    numOfBytesRead += score1 * 4 * 5
    
    if docID == 2:
        print "find"
        exit(1)
    
    inputFileHandler0.seek(numOfBytesRead)
    

    numOfDocsProcessed += 1

print numOfDocsProcessed,numOfBytesRead   
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName
exit(1)

inputFileName = "/home/diaosi/outputDirForIndexes/originalGov2Index/XdocDividedByLogNumOfPostingsSorted_100%_rawTrecEvalCompatible_20140506_OR"
outputFileName = "/home/diaosi/outputDirForIndexes/originalGov2Index/XdocDividedByLogNumOfPostingsSorted_100%_rawTrecEvalCompatible_20140506_OR_completed"

inputFileHandler0 = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    if line.strip().startswith("----->"):
        outputFileHandler.write( line[7:] )

print "OVERALL:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName

outputFileHandler.close()
inputFileHandler0.close()
exit(1)

documentResultDictFromFile0 = {}
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_1Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_3Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_5Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_10Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_15Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_20Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_30Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_40Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_50Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_60Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_70Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_80Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_90Percentage_complete
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_100Percentage_complete
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocSorted_20140504_100Percentage_complete"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    currentTrecID = lineElements[2]
    currentDocumentResultKey = currentQID + "_" + currentTrecID 
    if currentDocumentResultKey not in documentResultDictFromFile0:
        documentResultDictFromFile0[currentDocumentResultKey] = 1
    else:
        pass
inputFileHandler0.close()
print "len(documentResultDictFromFile0):",len(documentResultDictFromFile0)

documentResultDictFromFile1 = {}
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecResult_trecEvalCompatible_our_approach_weight_50_50Percentage_AND_completed
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecResult_trecEvalCompatible_our_approach_weight_50_40Percentage_AND_completed
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecResult_trecEvalCompatible_our_approach_weight_50_30Percentage_AND_completed
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecResult_trecEvalCompatible_our_approach_weight_50_20Percentage_AND_completed
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecResult_trecEvalCompatible_our_approach_weight_50_15Percentage_AND_completed
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecResult_trecEvalCompatible_our_approach_weight_50_10Percentage_AND_completed
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecResult_trecEvalCompatible_our_approach_weight_50_5Percentage_AND_completed
# /home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecResult_trecEvalCompatible_our_approach_weight_50_3Percentage_AND_completed
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecResult_trecEvalCompatible_our_approach_weight_50_50Percentage_AND_completed"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    currentTrecID = lineElements[2]
    currentDocumentResultKey = currentQID + "_" + currentTrecID 
    if currentDocumentResultKey not in documentResultDictFromFile1:
        documentResultDictFromFile1[currentDocumentResultKey] = 1
    else:
        pass
inputFileHandler0.close()
print "len(documentResultDictFromFile1):",len(documentResultDictFromFile1)
intersectionSet = set(documentResultDictFromFile0).intersection( set(documentResultDictFromFile1) )
unionSet = set(documentResultDictFromFile0).union( set(documentResultDictFromFile1) )
# print "documentResultDictFromFile0:",documentResultDictFromFile0
# print "documentResultDictFromFile1:",documentResultDictFromFile1
print "Symmetric Difference:",len(intersectionSet)/len(unionSet)
exit(1)

inputFileName = "/home/diaosi/outputDirForIndexes/originalGov2Index/qrels.gov2.04"
inputFileHandler0 = open(inputFileName,"r")
numOfDocumentCandidates = 0

for line in inputFileHandler0.readlines():
    currentQID = line.strip().split(" ")[0]
    currentRelevanceLabel = line.strip().split(" ")[3]
    if currentQID == "749" and currentRelevanceLabel != "0":
        numOfDocumentCandidates += 1
    if currentQID == "750" and currentRelevanceLabel != "0":
        numOfDocumentCandidates += 1

print "numOfDocumentCandidates:",numOfDocumentCandidates
exit(1)


inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocMultiplyByLogNumOfPostings_20140505_100Percentage_raw"
outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecEvalCompatibleBasedOnXdocMultiplyByLogNumOfPostings_20140505_100Percentage_completed"

inputFileHandler0 = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    if line.strip().startswith("----->"):
        outputFileHandler.write( line[7:] )

print "OVERALL:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName

outputFileHandler.close()
inputFileHandler0.close()
exit(1)

# currentLogNumOfPostings = math.log(7458,10)
# print "currentLogNumOfPostings:",currentLogNumOfPostings
# exit(1)

inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurningDividedByNumOfPostingsForEachDocument_sortedByXdoc"
outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_Xdocs_and_variations_20140505Night"
inputFileHandler0 = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentNumOfPostings = int(currentLineElements[2])
    currentXdocValue = float(currentLineElements[7])
    currentSqureRootNumOfPostings = math.sqrt(currentNumOfPostings)
    currentLogNumOfPostings = math.log(currentNumOfPostings,10)
    currentNewComputeValue1 = currentXdocValue / currentSqureRootNumOfPostings
    currentNewComputeValue2 = currentXdocValue * currentSqureRootNumOfPostings
    currentNewComputeValue3 = currentXdocValue / currentLogNumOfPostings
    currentNewComputeValue4 = currentXdocValue * currentLogNumOfPostings
    # column0: trecID
    # column1: docID
    # column2: # of unique terms
    # column3: # of terms (include duplicate ones)
    # column4: Xdoc
    # column5: Xdoc / (# of unique terms)
    # column6: Xdoc / sqrt(# of unique terms)
    # column7: Xdoc * sqrt(# of unique terms)
    # column8: Xdoc / log(# of unique terms)
    # column9: Xdoc * log(# of unique terms)
    outputLine = currentLineElements[0] + " " + currentLineElements[1] + " " + currentLineElements[2] + " " + currentLineElements[3] + " " + currentLineElements[7] + " " + currentLineElements[11] + " " + str(currentNewComputeValue1) + " " + str(currentNewComputeValue2) + " " + str(currentNewComputeValue3) + " " + str(currentNewComputeValue4) + "\n" 
    outputFileHandler.write(outputLine)
    currentLine = inputFileHandler0.readline()
outputFileHandler.close()
inputFileHandler0.close()

print "OVERALL:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)

currentDocDict = {}
inputFileName = "/home/diaosi/outputDirForIndexes/originalGov2Index/qrels.gov2.all"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    if lineElements[3] != "0":
        if lineElements[2] not in currentDocDict:
            currentDocDict[lineElements[2]] = 1
        else:
            currentDocDict[lineElements[2]] += 1

print "len(currentDocDict):",len(currentDocDict)
inputFileHandler0.close()
exit(1)

inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurningDividedByNumOfPostingsForEachDocument_sortedByXdoc"
outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_Xdoc_sorted_by_XdocUsingGoodTurningDividedBySquareNumOfPostingsForEachDocument"
inputFileHandler0 = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentNumOfPostings = int(currentLineElements[2])
    currentSqureRootNumOfPostings = math.sqrt(currentNumOfPostings)
    currentNewComputeValue1 = float(currentLineElements[7]) / currentSqureRootNumOfPostings
    outputLine = currentLineElements[0] + " " + currentLineElements[1] + " " + currentLineElements[2] + " " + currentLineElements[3] + " " + currentLineElements[7] + " " + currentLineElements[11] + " " + str(currentNewComputeValue1) + "\n" 
    outputFileHandler.write(outputLine)
    currentLine = inputFileHandler0.readline()
outputFileHandler.close()
inputFileHandler0.close()

print "OVERALL:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)






inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecResult_trecEvalCompatible_our_approach_weight_50_50Percentage_AND"
outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/trecResult_trecEvalCompatible_our_approach_weight_50_50Percentage_AND_completed"
inputFileHandler0 = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    outputFileHandler.write(line[7:])

inputFileHandler0.close()
outputFileHandler.close()

print "OVERALL:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)

docIDANDTrecIDDict = {}
ifn = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_MappingTableForGov2Dataset"
inputFileHandler0 = open(ifn,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = currentLineElements[1]
    currentTrecID = currentLineElements[0]
    if currentDocID not in docIDANDTrecIDDict:
        docIDANDTrecIDDict[currentDocID] = currentTrecID
    currentLine = inputFileHandler0.readline()
print "len(docIDANDTrecIDDict):",len(docIDANDTrecIDDict)
print "docIDANDTrecIDDict['0']:",docIDANDTrecIDDict['0']

inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/debugTrecResult"
outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/debugTrecResult_trecEvalCompatible"
inputFileHandler0 = open(inputFileName1,"r")
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentDocID = lineElements[2]
    outputLine = lineElements[0] + " " + lineElements[1] + " " + docIDANDTrecIDDict[ lineElements[2] ] + " " + lineElements[3] + " " + lineElements[4] + " " + lineElements[5] + "\n"
    outputFileHandler.write(outputLine)
inputFileHandler0.close()

print "OVERALL:"
print "ifn:",ifn
print "inputFileName1:",inputFileName1
print "outputFileName:",outputFileName
exit(1)

inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_debugPercentage_weight50"
inputFileHandler0 = open(inputFileName,"r")
sumNumOfPostings = 0
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    sumNumOfPostings += int(currentLineElements[1])
    currentLine = inputFileHandler0.readline()
print "sumNumOfPostings:",sumNumOfPostings
inputFileHandler0.close()
exit(1)

currentDocDict = {}
TOTAL_NUM_OF_POSTINGS = 6451948010
numOfPostingNeededToBePoppedAtDebugPercentage = 5000000
numOfPostingNeededToBePoppedAt1Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.01)
numOfPostingNeededToBePoppedAt3Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.03)
numOfPostingNeededToBePoppedAt5Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.05)
numOfPostingNeededToBePoppedAt10Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.1)
numOfPostingNeededToBePoppedAt15Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.15)
numOfPostingNeededToBePoppedAt20Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.2)
numOfPostingNeededToBePoppedAt30Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.3)
numOfPostingNeededToBePoppedAt40Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.4)
numOfPostingNeededToBePoppedAt50Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.5)
numOfPostingNeededToBePoppedAt60Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.6)
numOfPostingNeededToBePoppedAt70Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.7)
numOfPostingNeededToBePoppedAt80Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.8)
numOfPostingNeededToBePoppedAt90Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.9)
numOfPostingNeededToBePoppedAt100Percentage = int(TOTAL_NUM_OF_POSTINGS * 1.0)
print "numOfPostingNeededToBePoppedAtDebugPercentage:",numOfPostingNeededToBePoppedAtDebugPercentage
print "numOfPostingNeededToBePoppedAt1Percentage:",numOfPostingNeededToBePoppedAt1Percentage
print "numOfPostingNeededToBePoppedAt3Percentage:",numOfPostingNeededToBePoppedAt3Percentage
print "numOfPostingNeededToBePoppedAt5Percentage:",numOfPostingNeededToBePoppedAt5Percentage
print "numOfPostingNeededToBePoppedAt10Percentage:",numOfPostingNeededToBePoppedAt10Percentage
print "numOfPostingNeededToBePoppedAt15Percentage:",numOfPostingNeededToBePoppedAt15Percentage
print "numOfPostingNeededToBePoppedAt20Percentage:",numOfPostingNeededToBePoppedAt20Percentage
print "numOfPostingNeededToBePoppedAt30Percentage:",numOfPostingNeededToBePoppedAt30Percentage
print "numOfPostingNeededToBePoppedAt40Percentage:",numOfPostingNeededToBePoppedAt40Percentage
print "numOfPostingNeededToBePoppedAt50Percentage:",numOfPostingNeededToBePoppedAt50Percentage
print "numOfPostingNeededToBePoppedAt60Percentage:",numOfPostingNeededToBePoppedAt60Percentage
print "numOfPostingNeededToBePoppedAt70Percentage:",numOfPostingNeededToBePoppedAt70Percentage
print "numOfPostingNeededToBePoppedAt80Percentage:",numOfPostingNeededToBePoppedAt80Percentage
print "numOfPostingNeededToBePoppedAt90Percentage:",numOfPostingNeededToBePoppedAt90Percentage
print "numOfPostingNeededToBePoppedAt100Percentage:",numOfPostingNeededToBePoppedAt100Percentage
numOfBytes = 0
numOfPostingPopped = 0
numOfPostingBeInTOP10 = 0

inputFileName3 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140320Afternoon_weight_50_WHOLE"
inputFileHandler0 = open(inputFileName3,"rb")
print "--->posting file to evaluate:",inputFileName3
statinfo = os.stat(inputFileName3)
fileSize = statinfo.st_size
print "file size:",fileSize
numOfResultsReturnedCountedUpTo10 = 0

while numOfBytes < fileSize:
    # old version
    # each time, just read the info of ONE posting, too few
    # byteString = inputFileHandler0.read(4 + 4 + 4)
    # (termID,docID,currentProbability) = unpack( "2I1f", byteString)
    
    # current version
    # each time, read the info of 1M postings
    byteStringBuffer = inputFileHandler0.read( 1000000 * 16)
    byteStringBufferIndexPosition = 0
    for i in range(0,1000000):
        byteString = byteStringBuffer[byteStringBufferIndexPosition:byteStringBufferIndexPosition+16]
        byteStringBufferIndexPosition += 16
        (termID,docID,currentProbability,impactScore) = unpack( "2I2f", byteString)
        if docID not in currentDocDict:
            currentDocDict[docID] = 1
        else:
            currentDocDict[docID] += 1

        if numOfPostingPopped == numOfPostingNeededToBePoppedAtDebugPercentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt1Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt3Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt5Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt10Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt15Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt20Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt30Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt40Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt50Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt60Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt70Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt80Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt90Percentage:
            if numOfPostingPopped == numOfPostingNeededToBePoppedAtDebugPercentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_debugPercentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt1Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_1Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt3Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_3Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt5Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_5Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt10Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_10Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt15Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_15Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt20Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_20Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt30Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_30Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt40Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_40Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt50Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_50Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt60Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_60Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt70Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_70Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt80Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_80Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            
            if numOfPostingPopped == numOfPostingNeededToBePoppedAt90Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_90Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName

            if numOfPostingPopped == numOfPostingNeededToBePoppedAt100Percentage:
                outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/DocumentDistribution_100Percentage_weight50"
                outputFileHandler = open(outputFileName,"w")
                docIDList = []
                docIDList = currentDocDict.keys()
                docIDList.sort(cmp=None, key=None, reverse=False)
                for currentDOCID in docIDList:
                    outputLine = str(currentDOCID) + " " + str(currentDocDict[currentDOCID]) + "\n"
                    outputFileHandler.write(outputLine)
                outputFileHandler.close()
                print "len(currentDocDict):",len(currentDocDict)
                print "numOfPostingPopped:",numOfPostingPopped
                print "outputFileName:",outputFileName
            

        numOfBytes += 12
        numOfPostingPopped += 1
        if numOfPostingPopped % 1000000 == 0:
            print str(numOfPostingPopped),"postings have been examined."

inputFileHandler0.close()
exit(1)

# select the document based on XdocValues
inputFileName1 = "trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurningDividedByNumOfPostingsForEachDocument_sortedByXdoc"
inputFileHanlder = open(inputFileName1,"r")
sumNumOfPostings = 0
numOfDocsProcessed = 0
currentLine = inputFileHanlder.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentNumOfPostings = int(currentLineElements[2])
    sumNumOfPostings += currentNumOfPostings
    numOfDocsProcessed += 1
    if numOfDocsProcessed == 25205179:
        break
            # 16234 1%
            # 58892 3%
            # 113151 5%
            # 303078 10%
            # 573053 15%
            # 960415 20%
            # 1704697 30%
            # 3048106 40%
            # 4840867 50%
            # 7078881 60%
            # 9726244 70%
            # 13270243 80%
            # 17993779 90%
            # 25205179 100%
    currentLine = inputFileHanlder.readline()
inputFileHanlder.close()
print "OVERALL:"
print "sumNumOfPostings:",sumNumOfPostings
print "numOfDocsProcessed:",numOfDocsProcessed
exit(1)

# option1:
# random document selection
# inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/randomDocumentsSelectedAtDifferentPercentage_20140412"
# option2:
# select the document based on XdocValues
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/gov2_Docs_with_TheirXdocValues_Since20140428_sortedByXdocValues"
inputFileHanlder = open(inputFileName1,"r")
currentLine = inputFileHanlder.readline()

currentDocDict = {}
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = currentLineElements[1]
    if currentDocID not in currentDocDict:
        currentDocDict[currentDocID] = 1
    else:
        print "duplicated docID detected."
        exit(1)
    if len(currentDocDict) == 25205179:
        # 16234 1%
        # 58892 3%
        # 113151 5%
        # 303078 10%
        # 573053 15%
        # 960415 20%
        # 1704697 30%
        # 3048106 40%
        # 4840867 50%
        # 7078881 60%
        # 9726244 70%
        # 13270243 80%
        # 17993779 90%
        # 25205179 100%
        break
    currentLine = inputFileHanlder.readline()
print "len(currentDocDict):",len(currentDocDict)
inputFileHanlder.close()
exit(1)

queryIDWithQueryContentDict = {}
inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/AND_top100_100%_tb04-06_final_with_termID_and_score_added"
inputFileHandler1 = open(inputFileName1,"r")
for line in inputFileHandler1.readlines():
    lineElements = line.strip().split(" ")
    outputLine = ""
    currentQID = lineElements[0]
    for currentElement in lineElements[6:-1]:
        outputLine += currentElement + " "
    outputLine = outputLine.strip()
    if currentQID not in queryIDWithQueryContentDict:
         queryIDWithQueryContentDict[currentQID] = outputLine
    # print outputLine
print "queryIDWithQueryContentDict['701']:",queryIDWithQueryContentDict["701"]
print "queryIDWithQueryContentDict['702']:",queryIDWithQueryContentDict["702"]
inputFileHandler1.close()

inputFileName2 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/rawResultFile_150_human_queries_top2M_AND_20140412"
outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/AND_top2M_100%_tb04-06_final_with_termID_and_score_added"

inputFileHanlder2 = open(inputFileName2,"r")
outputFileHandler = open(outputFileName,"w")

currentQueryID = ""
currentLine = inputFileHanlder2.readline()
while currentLine:
    if currentLine.strip().startswith("qid:"):
        currentLineElements = currentLine.strip().split(" ")
        print currentLineElements
        currentQueryID = currentLineElements[1]
        currentRank = 0
        
    if currentLine.strip().startswith("Score:"):
        currentRank += 1
        currentLineElements = currentLine.strip().split("\t")
        currentScoreStringPair = currentLineElements[0]
        currentDocIDStringPair = currentLineElements[1]
        currentTrecIDStringPair = currentLineElements[2]
        currentScore = currentScoreStringPair.strip().split(":")[1].strip()
        currentDocID = currentDocIDStringPair.strip().split(":")[1].strip()
        currentTrecID = currentTrecIDStringPair.strip().split(":")[1].strip()
        outputFileLine = str(currentQueryID) + " " + "Q0" + " " + str(currentTrecID) + " " + str(currentRank) + " " + str(currentScore) + " " + "PolyIRTK" + " " + queryIDWithQueryContentDict[currentQueryID] + " " + str(currentDocID) + "\n"
        outputFileHandler.write(outputFileLine)
    
    currentLine = inputFileHanlder2.readline()

print "Overall:"
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
exit(1)

queryTermFreqDict = {}
inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/realFreqOfTermsForTail5KFrom100K"
inputFileHandler0 = open(inputFileName1,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if queryTerm not in queryTermFreqDict:
        queryTermFreqDict[queryTerm] = queryTermFreq  
inputFileHandler0.close()
print "len(queryTermFreqDict):",len(queryTermFreqDict)

queryTermIDFreqDict = {}
inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
inputFileHandler0 = open(inputFileName2,"r")
currentLine = inputFileHandler0.readline() 
while currentLine:
    currentLineElements = currentLine.strip().split(" ") 
    currentQueryTermID = currentLineElements[0]
    currentQueryTerm = currentLineElements[1]
    if currentQueryTerm in queryTermFreqDict:
         queryTermIDFreqDict[currentQueryTermID] = queryTermFreqDict[currentQueryTerm]
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "len(queryTermIDFreqDict):",len(queryTermIDFreqDict)
print "queryTermFreqDict['000']:",queryTermFreqDict['000']
print "queryTermIDFreqDict['2']:",queryTermIDFreqDict['2']

sumOfCPUCost = 0
inputFileName3 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/randomDocumentPartitionBasedInvertedIndexStatistics_15%_20140425"
inputFileHandler0 = open(inputFileName3,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTermIDListLength = int(currentLineElements[1])
    if currentTermID not in queryTermIDFreqDict:
        pass
    else:
        sumOfCPUCost += queryTermIDFreqDict[currentTermID] * currentTermIDListLength 
    currentLine = inputFileHandler0.readline()
print "1% sumOfCPUCost:",sumOfCPUCost
inputFileHandler0.close()
print "Overall:"
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "inputFileName3:",inputFileName3
exit(1)

# get all the relevant document results from the human judge side
humanJudgeDocumentResultDict = {}
# for pangolin:
# inputFileName1 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/data/qrels.tb04-tb06.top150_with_docID_added_termid_added_AND"
# for dodo:
inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/qrels.tb04-tb06.top150_with_docID_added_termid_added_AND"
inputFileHandler0 = open(inputFileName1,"r")
for index,line in enumerate( inputFileHandler0.readlines() ):
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    relevanceLabel = lineElements[3]
    currentTrecID = lineElements[2]
    if relevanceLabel != "0":
        currentDocumentResultKey = currentQID + "_" + currentTrecID
        if currentDocumentResultKey not in humanJudgeDocumentResultDict:
            humanJudgeDocumentResultDict[currentDocumentResultKey] = 1
print "len(humanJudgeDocumentResultDict):",len(humanJudgeDocumentResultDict)


searchSystemDocumentResultDict = {}
# AND
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/AND_top100_100%_tb04-06_final_with_termID_and_score_added"
# OR
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/OR_top100_100%_tb04-06_final_with_termID_and_score_added"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    currentTrecID = lineElements[2]
    currentRank = int(lineElements[3])
    currentDocumentResultKey = currentQID + "_" + currentTrecID
    if currentRank <= 10:
        if currentDocumentResultKey not in searchSystemDocumentResultDict:
            searchSystemDocumentResultDict[currentDocumentResultKey] = 1
        else:
            exit(1)
print "len(searchSystemDocumentResultDict):",len(searchSystemDocumentResultDict)

'''
searchSystemDocumentResultDict = {}
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/AND_top100_100%_tb04-06_final_with_termID_and_score_added"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    currentTrecID = lineElements[2]
    currentRank = int(lineElements[3])
    currentDocumentResultKey = currentQID + "_" + currentTrecID
    if currentRank <= 10:
        if currentDocumentResultKey not in searchSystemDocumentResultDict:
            searchSystemDocumentResultDict[currentDocumentResultKey] = 1
        else:
            exit(1)
print "len(searchSystemDocumentResultDict):",len(searchSystemDocumentResultDict)
'''

print "overall:"
intersectionSet = set(humanJudgeDocumentResultDict).intersection( set(searchSystemDocumentResultDict) )
print len(intersectionSet)
exit(1)

# get all the related document results from the human judge side
humanJudgeDocumentResultDict1 = {}
inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/qrels.gov2.all"
inputFileHandler0 = open(inputFileName1,"r")
for index,line in enumerate( inputFileHandler0.readlines() ):
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    currentTrecID = lineElements[2]
    relevanceLabel = lineElements[3]
    if relevanceLabel != "0":
        currentDocumentResultKey1 = currentQID + "_" + currentTrecID
        if currentDocumentResultKey1 not in humanJudgeDocumentResultDict1:
            humanJudgeDocumentResultDict1[currentDocumentResultKey1] = 1
print "len(humanJudgeDocumentResultDict1):",len(humanJudgeDocumentResultDict1)
exit(1)

# key: key based on the list length label 
# value: dict
    # key: key based on the piece num
    # value: # of postings in that cell
numeratorDict = {}
inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/relRankFilesRelated/termPieceInfoForQueryTerms_stepGap_2_OLD"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    listLengthKey = int(lineElements[2])
    if listLengthKey not in numeratorDict:
        numeratorDict[listLengthKey] = {}
    
    numOfPieces = int(lineElements[3])
    base = 4
    for i in range(0,numOfPieces):
        pieceNum = int(lineElements[base])
        score1 = int(lineElements[base+1])
        
        if pieceNum not in numeratorDict[listLengthKey]:
            numeratorDict[listLengthKey][pieceNum] = 0
        numeratorDict[listLengthKey][pieceNum] += score1
        
        base += 2

print "numeratorDict: ",numeratorDict
print "numeratorDict[0][0]: ",numeratorDict[0][0]
exit(1)

# pangolin:
# inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140321Morning_weight_150_Part4"
# dodo:
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140321Morning_weight_150_WHOLE"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

for i in range(0,100):
    byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
    (termID,docID,combinedProbability,impactScore) = unpack( "2I2f", byteString)
    print termID,docID,combinedProbability,impactScore

inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName
exit(1)


# options:
inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/fallThroughRate_weight_0_OR_Debug_gainRateAdded_sortedByGainRate"
# inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/fallThroughRate_weight_0_OR_1%_gainRateAdded_sortedByGainRate"
# inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/fallThroughRate_weight_0_OR_5%_gainRateAdded_sortedByGainRate"
# inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/fallThroughRate_weight_0_OR_15%_gainRateAdded_sortedByGainRate"
inputFileHandler0 = open(inputFileName,"r")
numOfRelatedDocumentResultReturned = 0
numOfDocumentResultInTotal = 0
totalQueryEvaluationCostForCPUFromFirstTier = 0
totalQueryEvaluationCostForCPUFromSecondTier = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    numOfRelatedDocumentResultReturned += int(lineElements[1])
    numOfDocumentResultInTotal += int(lineElements[2])
    totalQueryEvaluationCostForCPUFromFirstTier += int(lineElements[3])
    totalQueryEvaluationCostForCPUFromSecondTier += int(lineElements[4])
print "numOfDocumentResultInTotal:",numOfDocumentResultInTotal
print "numOfRelatedDocumentResultReturned:",numOfRelatedDocumentResultReturned
print "totalQueryEvaluationCostForCPUFromFirstTier:",totalQueryEvaluationCostForCPUFromFirstTier
print "totalQueryEvaluationCostForCPUFromSecondTier:",totalQueryEvaluationCostForCPUFromSecondTier
inputFileHandler0.close()
exit(1)

inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/tempDebug"
inputFileHandler0 = open(inputFileName,"r")
maxNumOfRelatedDocumentResultReturned = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentNumOfRelatedDocumentResultReturned = int(lineElements[1])
    if currentNumOfRelatedDocumentResultReturned > 10:
        print line.strip()
        exit()
    if maxNumOfRelatedDocumentResultReturned < currentNumOfRelatedDocumentResultReturned:
        maxNumOfRelatedDocumentResultReturned = currentNumOfRelatedDocumentResultReturned
print "maxNumOfRelatedDocumentResultReturned:",maxNumOfRelatedDocumentResultReturned
inputFileHandler0.close()
exit(1)

# AND
# quality
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_0_AND_20140322_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_1_AND_20140321"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/scripts/src/shellScripts/evaluate_quality_weight_5_AND_20140324_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_10_AND_20140322_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_50_AND_20140323_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/scripts/src/shellScripts/evaluate_quality_weight_100_AND_20140324_LOG"

# OR
# quality
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_0_OR_20140322_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_1_OR_20140321"
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/scripts/src/shellScripts/evaluate_quality_weight_5_OR_20140324_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_10_OR_20140322_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_50_OR_20140323_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/scripts/src/shellScripts/evaluate_quality_weight_100_OR_20140324_LOG"

inputFileHandler0 = open(inputFileName,"r")
print "inputFileName:",inputFileName
currentLine = inputFileHandler0.readline()
while currentLine:
    if currentLine.strip().startswith("numOfRelatedDocumentResultReturned:") and currentLine.strip().endswith("returned results up to 10 by system"):
        print currentLine,
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
exit(1)


# AND
# overlap
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_0_AND_20140321"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_1_AND_20140321"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/scripts/src/shellScripts/evaluate_overlap_weight_5_AND_20140324_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_10_AND_20140322_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_50_AND_20140323_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/scripts/src/shellScripts/evaluate_overlap_weight_100_AND_20140324_LOG"

# OR
# overlap
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_0_OR_20140321"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_1_OR_20140321"
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/scripts/src/shellScripts/evaluate_overlap_weight_5_OR_20140324_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_10_OR_20140323_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_50_OR_20140323_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/scripts/src/shellScripts/evaluate_overlap_weight_100_OR_20140324_LOG"

inputFileHandler0 = open(inputFileName,"r")
print "inputFileName:",inputFileName
currentLine = inputFileHandler0.readline()
while currentLine:
    if currentLine.strip().startswith("numOfRelatedDocumentResultReturned:"):
        print currentLine,
    # if currentLine.strip().startswith("numOfPostingPopped:"):
    #     print currentLine,
    if currentLine.strip().startswith("numOfTOP10PostingsRetained:"):
        print currentLine
    
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
exit(1)

inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140321Morning_weight_100_WHOLE"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

for i in range(0,1024):
    byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
    (termID,docID,combinedProbability,impactScore) = unpack( "2I2f", byteString)
    print termID,docID,combinedProbability,impactScore

inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName
exit(1)


def getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile(inputFileName1,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE,termANDTermIDDict,searchSystemDocumentResultDict,postingWithDocumentResultDict,qIDWithThresholdDict):
    print "getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile() function begins..."
    #tempQueryTermDict = {}
    #tempOutputFileName = "/data/obukai/gov2ClearYourMindAndDoItAgain2014/partOfLexiconTermsWithTermIDForTail5KQueries"
    #tempOutputFileHandler = open(tempOutputFileName,"w")
    
    tempCounter = 0
    tempCounter2 = 0
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    numOfQueriesHavingSearchResults = 0
    
    inputFileHandler1 = open(inputFileName1,"r")
    
    
    currentLine = inputFileHandler1.readline()
    nextLine = ""
    currentQID = ""
    
    numOfResultsForTheAllQueries = 0
    
    while currentLine:
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            currentQID = currentLine.strip().split(":")[1].strip()
            print "currentQID:",currentQID
            numOfQueriesHavingQID += 1
        
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:"):
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler1.readline()
            # print nextLine.strip()
            parsedFlag = True
            
            if nextLine.strip().endswith("is NOT in the lexicon."):
                parsedFlag = False
            
            if nextLine.strip() != "" and parsedFlag:
                
                # skip the following sampling line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                # nextLine = inputFileHandler1.readline()
                
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
                
                # temp section
                #for queryTermIndex in currentQueryTermIndexDict:
                #    currentQueryTerm = currentQueryTermIndexDict[queryTermIndex]
                #    if currentQueryTerm not in tempQueryTermDict:
                #        tempQueryTermDict[currentQueryTerm] = 1
                #        tempOutputFileHandler.write( currentQueryTerm + "\n")
                
                
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                # skip one line
                currentLine = inputFileHandler1.readline()
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler1.readline()
                numOfResultsForTheCurrentQuery = 0
                
                # for IMPORTANT DEBUG ONLY
                # print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                # print "currentLine:",currentLine
                # exit(1)
                
                lineElements = currentLine.strip().split(" ")
                while len( lineElements ) == 23:
                    theRank = int(lineElements[0])
                    theDocID = lineElements[-1]
                    theScore = float(lineElements[-2])
                    documentResultKey = currentQID + "_" + theDocID
                    
                    if currentQID not in searchSystemDocumentResultDict:
                        searchSystemDocumentResultDict[currentQID] = {}
                    
                    if theRank <= 10:
                        if documentResultKey not in searchSystemDocumentResultDict:
                            searchSystemDocumentResultDict[currentQID][documentResultKey] = 0.0
                            qIDWithThresholdDict[currentQID] = theScore; 
                        else:
                            print "duplicated document result."
                            print "documentResultKey:",documentResultKey
                            exit(1)
                        
                        upperBound = 0
                        if len(currentQueryTermIndexDict) >= 10:
                            upperBound = 10
                        else:
                            upperBound = len(currentQueryTermIndexDict)
                        
                        for i in range(0,upperBound):
                            currentTerm = currentQueryTermIndexDict[i]
                            if currentTerm not in termANDTermIDDict:
                                print currentTerm,"is NOT in the dict."
                                exit(1)
                            else:
                                currentTermID = termANDTermIDDict[currentTerm]
                            currentTermScore = float( lineElements[1 + i] )
                            if currentTermScore != 0.0:
                                postingKey = currentTermID + "_" + theDocID
                                if postingKey not in postingWithDocumentResultDict:
                                    postingWithDocumentResultDict[postingKey] = []
                                
                                postingWithDocumentResultDict[postingKey].append(documentResultKey)    
                                
                                tempCounter += 1
                        tempCounter2 += len(currentQueryTermIndexDict)
                    else:
                        pass
                    


                    numOfResultsForTheCurrentQuery += 1
                    
                    currentLine = inputFileHandler1.readline()
                    lineElements = currentLine.strip().split(" ")
                   
                numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
                
                # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
                # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
                # This sampling method has been developed on 2013/01/09
                
                print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                print "tempCounter:",tempCounter
                print "tempCounter2:",tempCounter2
                print
            
        currentLine = inputFileHandler1.readline()
        
    print "numOfQueriesHavingQID:",numOfQueriesHavingQID
    print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
    print "numOfQueriesHavingSearchResults:",numOfQueriesHavingSearchResults
    print "getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile() function ends."




# key: qID_docID
# value: 1
qIDWithThresholdDict = {}
searchSystemDocumentResultDict = {}
postingWithDocumentResultDict = {}

# OR
# for pangolin:
# inputFileName1 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/data/rawResults_50%_TOP10_OR_20140126Night"
# for dodo:
# inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/rawResults_50%_TOP10_OR_20140126Night"
inputFileName1 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/raw_100%_tail5KResults_OR_semantics"

termANDTermIDDict = {}
# for dodo:
ifn = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/partOfLexiconTermsWithTermIDForTail5KQueries"
# for pangolin:
# ifn = "/data/obukai/gov2ClearYourMindAndDoItAgain2014/partOfLexiconTermsWithTermIDForTail5KQueries"
inputFileHandler0 = open(ifn,"r")
currentLine = inputFileHandler0.readline()
currentLineNum = 0

while currentLine:
    if currentLineNum % 1000000 == 0:
        print currentLineNum,"lines loaded."
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTerm = currentLineElements[1]
    if currentTerm not in termANDTermIDDict:
        termANDTermIDDict[currentTerm] = currentTermID
    currentLine = inputFileHandler0.readline()
    currentLineNum += 1
print "len(termANDTermIDDict): ",len(termANDTermIDDict)
inputFileHandler0.close()


getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile(inputFileName1,10,termANDTermIDDict,searchSystemDocumentResultDict,postingWithDocumentResultDict,qIDWithThresholdDict)
print "overall:"

documentResultCounter = 0
for currentQID in searchSystemDocumentResultDict:
    documentResultCounter += len( searchSystemDocumentResultDict[currentQID] )
print "# of top10 document results:",documentResultCounter

postingCounter = 0
for currentPostingKey in postingWithDocumentResultDict:
    postingCounter += len(postingWithDocumentResultDict[currentPostingKey])
print "# of top10 postings:",postingCounter

print "qIDWithThresholdDict['95001'] threshold:",qIDWithThresholdDict['95001']
print "qIDWithThresholdDict['95003'] threshold:",qIDWithThresholdDict['95003']
exit(1)




inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140321Morning_weight_100_Part3"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

for i in range(0,1024):
    byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
    (termID,docID,combinedProbability,impactScore) = unpack( "2I2f", byteString)
    print termID,docID,combinedProbability,impactScore

print "Overall:"
print "inputFileName:",inputFileName
inputFileHandler0.close()
exit(1)


print "simpleLook"
# AND
# overlap
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_0_AND_20140321"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_1_AND_20140321"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_10_AND_20140322_LOG"

# OR
# overlap
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_0_OR_20140321"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_1_OR_20140321"
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_overlap_weight_10_OR_20140322_LOG"


inputFileHandler0 = open(inputFileName,"r")
print "inputFileName:",inputFileName
currentLine = inputFileHandler0.readline()
while currentLine:
    if currentLine.strip().startswith("numOfRelatedDocumentResultReturned:"):
        print currentLine,
    # if currentLine.strip().startswith("numOfPostingPopped:"):
    #     print currentLine,
    if currentLine.strip().startswith("numOfTOP10PostingsRetained:"):
        print currentLine
    
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
exit(1)


# AND
# quality
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_0_AND_20140322_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_1_AND_20140321"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_10_AND_20140322_LOG"

# OR
# quality
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_0_OR_20140322_LOG"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_1_OR_20140321"
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRToolkit_Wei/evaluate_quality_weight_10_OR_20140322_LOG"

inputFileHandler0 = open(inputFileName,"r")
print "inputFileName:",inputFileName
currentLine = inputFileHandler0.readline()
while currentLine:
    if currentLine.strip().startswith("numOfRelatedDocumentResultReturned:") and currentLine.strip().endswith("returned results up to 10 by system"):
        print currentLine,
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
exit(1)


print "auxFileGeneration() begins..."
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140321Morning_weight_0_WHOLE"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140320Afternoon_weight_1_WHOLE"
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140321Morning_weight_10_WHOLE"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140320Afternoon_weight_50_WHOLE"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

for i in range(0,1024):
    byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
    (termID,docID,combinedProbability,impactScore) = unpack( "2I2f", byteString)
    print termID,docID,combinedProbability,impactScore
  
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName

inputFileHandler0.close()
print "auxFileGeneration() ends."
exit(1)

print "auxFileGeneration() begins..."
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140321Morning_weight_0_WHOLE"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140320Afternoon_weight_1_WHOLE"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/results/allPostingsBeingPopped20140320Afternoon_weight_50_WHOLE"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

for i in range(0,256):
    byteString = inputFileHandler0.read(4 + 4 + 4 + 4)
    (termID,docID,combinedProbability,impactScore) = unpack( "2I2f", byteString)
    print termID,docID,combinedProbability,impactScore
  
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName
print "auxFileGeneration() ends."
exit(1)


'''
# options:
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART0_1_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART2_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART3_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo_PART4_DOC.binary"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

numOfBytesRead = 0
numOfDocsProcessed = 0
# numOfDocsProcessed = 6000000


while numOfBytesRead <= fileSize:
    if numOfDocsProcessed % 100000 == 0:
        print numOfDocsProcessed,numOfBytesRead
    # print "numOfBytesRead:",numOfBytesRead
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    print "docID:",str(docID),str(score1)

    for index in range(0,score1):
        byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
        (termID,staticProbability,dynamicProbability,combinedProbability,impactScore) = unpack( "1I4f", byteString)
        print index,termID,staticProbability,dynamicProbability,combinedProbability,impactScore
    exit(1)
    numOfBytesRead += 4
    numOfBytesRead += 4
    numOfBytesRead += score1 * 4 * 5
    
    if docID == 15826653:
        print "find"
        exit(1)
    
    inputFileHandler0.seek(numOfBytesRead)
    

    numOfDocsProcessed += 1

print numOfDocsProcessed,numOfBytesRead   
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName

inputFileHandler0.close()
print "auxFileGeneration() ends."
exit(1)
'''

'''
# gold
inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/data/LEAVE_selectedDocumentPostingValuesInfo20140215Afternoon_WHOLE_DOC.binary"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize


numOfDocsProcessed = 6000000
numOfBytesRead = 17959918476
inputFileHandler0.seek(numOfBytesRead)

while numOfBytesRead <= fileSize:
    if numOfDocsProcessed % 100000 == 0:
        print numOfDocsProcessed,numOfBytesRead
    # print "numOfBytesRead:",numOfBytesRead
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    print "docID:",str(docID),str(score1)

    
    if docID == 6000000:
        print "find"
        for index in range(0,score1):
            byteString = inputFileHandler0.read(4 + 4 + 4)
            (termID,staticProbability,dynamicProbability) = unpack( "1I2f", byteString)
            print str(index),str(termID),str(staticProbability),str(dynamicProbability)
        exit(1)
    
    numOfBytesRead += 4
    numOfBytesRead += 4
    numOfBytesRead += score1 * 4 * 3
    

    
    inputFileHandler0.seek(numOfBytesRead)
    

    numOfDocsProcessed += 1

print numOfDocsProcessed,numOfBytesRead   
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName

inputFileHandler0.close()
print "auxFileGeneration() ends."
exit(1)
'''

# options:
# inputFileName = N/A
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize


numOfDocsProcessed = 0
numOfBytesRead = 0
inputFileHandler0.seek(numOfBytesRead)

while numOfBytesRead <= fileSize:
    if numOfDocsProcessed % 100000 == 0:
        print numOfDocsProcessed,numOfBytesRead
    # print "numOfBytesRead:",numOfBytesRead
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    print "docID:",str(docID),str(score1)
    
    
    if docID == 6000000:
        print "find"
        for index in range(0,score1):
            byteString = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
            (termID,staticProbability,dynamicProbability,_,impactScore) = unpack( "1I4f", byteString)
            print str(index),str(termID),str(staticProbability),str(dynamicProbability),str(impactScore)
        exit(1)
    numOfBytesRead += 4
    numOfBytesRead += 4
    numOfBytesRead += score1 * 4 * 5
    

    
    inputFileHandler0.seek(numOfBytesRead)
    

    numOfDocsProcessed += 1

print numOfDocsProcessed,numOfBytesRead   
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName

inputFileHandler0.close()
print "auxFileGeneration() ends."
exit(1)


print "auxFileGeneration() begins..."
# for pangolin:
# inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/selectedDocumentPostingValuesInfo20140215Afternoon_WHOLE_DOC.binary"
# inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/selectedDocumentPostingValuesInfo20140306Afternoon_PART0_DOC.binary"
# inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/LEAVE_selectedDocumentPostingValuesInfo20140306Afternoon_PART1_DOC.binary"
inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/LEAVE_selectedDocumentPostingValuesInfo20140310Afternoon_PART0_1_DOC.binary"
# for dodo:
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRIndexer/selectedDocumentPostingValuesInfo20140306Afternoon_PART3_DOC.binary"
# inputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRIndexer/LEAVE_selectedDocumentPostingValuesInfo20140306Afternoon_PART2_DOC.binary"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize

numOfBytesRead = 0
numOfDocsProcessed = 0
# numOfDocsProcessed = 6000000


while numOfBytesRead <= fileSize:
    if numOfDocsProcessed % 100000 == 0:
        print numOfDocsProcessed,numOfBytesRead
    # print "numOfBytesRead:",numOfBytesRead
    byteString = inputFileHandler0.read(4 + 4)
    (docID,score1) = unpack( "2I", byteString)
    # print "docID:",str(docID),str(score1)
    
    numOfBytesRead += 4
    numOfBytesRead += 4
    numOfBytesRead += score1 * 4 * 5
    
    inputFileHandler0.seek(numOfBytesRead)
    
    '''
    for index in range(0,score1):
        byteString = inputFileHandler0.read(4 + 4 + 4)
        (termID,staticProbability,dynamicProbability) = unpack( "1I2f", byteString)
        # print index,termID,staticProbability,dynamicProbability
    '''
    numOfDocsProcessed += 1

print numOfDocsProcessed,numOfBytesRead   
inputFileHandler0.close()
print "Overall:"
print "inputFileName:",inputFileName

inputFileHandler0.close()
print "auxFileGeneration() ends."

exit(1)


fileName1 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/scripts/src/cScripts/files_to_DELETE_since20140318Afternoon/resultfile0"
inputFileHandler0 = open(fileName1,"rb")
s1 = inputFileHandler0.read(4 + 4 + 4)
(termID,docID,probability) = unpack( "2I1f", s1)
print "termID:",termID,"docID:",docID,"probability:",probability
s1 = inputFileHandler0.read(4 + 4 + 4)
(termID,docID,probability) = unpack( "2I1f", s1)
print "termID:",termID,"docID:",docID,"probability:",probability

fileName2 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/scripts/src/cScripts/LEAVE_allPostingsBeingPopped20140310Afternoon_weight_0_WHOLE"
inputFileHandler0 = open(fileName2,"rb")
s1 = inputFileHandler0.read(4 + 4 + 4 + 4)
(termID,docID,probability,impactScore) = unpack( "2I2f", s1)
print "termID:",termID,"docID:",docID,"probability:",probability,"impactScore:",impactScore
s1 = inputFileHandler0.read(4 + 4 + 4 + 4)
(termID,docID,probability,impactScore) = unpack( "2I2f", s1)
print "termID:",termID,"docID:",docID,"probability:",probability,"impactScore:",impactScore

exit(1)


'''
numOfPostingsNeededToRead = 100
for i in range(0,numOfPostingsNeededToRead):
    s = inputFileHandler0.read(4 + 4 + 4)
    (termID,docID,finalProbablity) = unpack( "2I1f", s)
    print termID,docID,finalProbablity
'''
# inputFileHandler0 = open("/home/diaosi/workspace/web-search-engine-wei-2014-March/polyIRIndexer/LEAVE_selectedDocumentPostingValuesInfo20140306Afternoon_PART2_DOC.binary","rb")
s1 = inputFileHandler0.read(4 + 4)
(docID,score1) = unpack( "2I", s1)
print "docID:",docID
print "score1:",score1
for i in range(0,338):
    s1 = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
    (termID,currentStaticProbablity,currentDynamicProbability,currentFinalProbability,partialBM25) = unpack( "1I4f", s1)
    print i,termID,currentStaticProbablity,currentDynamicProbability,currentFinalProbability,partialBM25

s1 = inputFileHandler0.read(4 + 4)
(docID,score1) = unpack( "2I", s1)
print "docID:",docID
print "score1:",score1
for i in range(0,97):
    s1 = inputFileHandler0.read(4 + 4 + 4 + 4 + 4)
    (termID,currentStaticProbablity,currentDynamicProbability,currentFinalProbability,partialBM25) = unpack( "1I4f", s1)
    print i,termID,currentStaticProbablity,currentDynamicProbability,currentFinalProbability,partialBM25

exit(1)
'''
inputFileHandler0.seek(333316040)
s1 = inputFileHandler0.read(4)
(docID,) = unpack( "1I", s1)
print "docID:",docID    
'''
    
exit(1)

documentResultsDict = {}
inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/tail5KResults_NEW_FORMAT_20140222Afternoon_OR"
inputFileHandler0 = open(inputFileName,"r")
tempCounter = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    documentResultKey = lineElements[0] + "_" + lineElements[1]
    if documentResultKey not in documentResultsDict:
        documentResultsDict[documentResultKey] = 1
    currentScore = float(lineElements[3])
    if currentScore != 0.0:
        tempCounter += 1

print "len(documentResultsDict):",len(documentResultsDict)
print "tempCounter:",tempCounter
inputFileHandler0.close()
exit(1)

outputFileName = "/data/obukai/gov2ClearYourMindAndDoItAgain2014/partOfLexiconTermsWithTermIDForTail5KQueries"
outputFileHandler = open(outputFileName,"w")

queryTermDict = {}
ifn = "/data/obukai/gov2ClearYourMindAndDoItAgain2014/partOfLexiconTermsForTail5KQueries"
inputFileHandler0 = open(ifn,"r")
for line in inputFileHandler0.readlines():
    currentTerm = line.strip()
    if currentTerm not in queryTermDict:
         queryTermDict[currentTerm] = 1
print "len(queryTermDict):",len(queryTermDict)
inputFileHandler0.close()

# the input format of the file should be the following:
# (termID, term)
# for dodo:
# ifn = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
# for pangolin:
inputFileName1 = "/data/obukai/gov2ClearYourMindAndDoItAgain2014/wholeLexiconTermsWithTermID"
inputFileHandler0 = open(inputFileName1,"r")
currentLine = inputFileHandler0.readline()
currentLineNum = 0

while currentLine:
    if currentLineNum % 1000000 == 0:
        print currentLineNum,"lines loaded."
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTerm = currentLineElements[1]    
    if currentTerm in queryTermDict:
        outputFileHandler.write(currentTermID + " " + currentTerm + "\n")
    currentLine = inputFileHandler0.readline()
    currentLineNum += 1

inputFileHandler0.close()
outputFileHandler.close()

print "Overall:"
print "ifn:",ifn
print "inputFileName1:",inputFileName1
print "outputFileName:",outputFileName
exit(1)




searchSystemDocumentResultDict1 = {}
# AND
# inputFileName2 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/AND_top100_100%_tb04-06_final_with_termID_and_score_added"
# OR
inputFileName2 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/OR_top100_100%_tb04-06_final_with_termID_and_score_added"

inputFileHandler0 = open(inputFileName2,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    currentDocID = lineElements[-1]
    currentRank = int(lineElements[3])
    if currentRank <= 10:
        documentResultKey = currentQID + "_" + currentDocID
        if documentResultKey not in searchSystemDocumentResultDict1:
            searchSystemDocumentResultDict1[documentResultKey] = 1
print "len(searchSystemDocumentResultDict1):",len(searchSystemDocumentResultDict1)
inputFileHandler0.close()
exit(1)


def getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile(inputFileName1,outputFileName,termANDTermIDDict,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE,documentResultWithPostingValuesDict,postingWithDocumentResultDict):
    print "getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile() function begins..."
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    numOfQueriesHavingSearchResults = 0
    
    inputFileHandler1 = open(inputFileName1,"r")
    outputFileHandler = open(outputFileName,"w")
    
    currentLine = inputFileHandler1.readline()
    nextLine = ""
    currentQID = ""
    
    numOfResultsForTheAllQueries = 0
    
    while currentLine:
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            currentQID = currentLine.strip().split(":")[1].strip()
            print "currentQID:",currentQID
            numOfQueriesHavingQID += 1
        
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:"):
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler1.readline()
            print nextLine.strip()
            exit(1)
            
            parsedFlag = True
            
            if nextLine.strip().endswith("is NOT in the lexicon."):
                parsedFlag = False
            
            if nextLine.strip() != "" and parsedFlag:
                
                # skip the following sampling line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                # nextLine = inputFileHandler1.readline()
                
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
                
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler1.readline()
                numOfResultsForTheCurrentQuery = 0
                currentResultLines = []
                # for IMPORTANT DEBUG ONLY
                print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                print "currentLine:",currentLine
                exit(1)
                
                lineElements = currentLine.strip().split(" ")
                while len( lineElements ) == 14:
                    theRank = int(lineElements[0])
                    theTrecID = lineElements[-1]
                    theDocID = lineElements[-2]
                    theScore = lineElements[-3]
                    documentResultKey = currentQID + "_" + theTrecID
                    if documentResultKey not in documentResultWithPostingValuesDict:
                        documentResultWithPostingValuesDict[documentResultKey] = {}
                    
                    outputLine = str(currentQID) + " " + "Q0" + " " + str(theTrecID) + " " + str(theRank) + " " + str(theScore) + " " + "PolyIRTK" + " "
                    
                    for i in range(0,len(currentQueryTermIndexDict)):
                        currentTerm = currentQueryTermIndexDict[i]
                        currentTermID = termANDTermIDDict[currentTerm]
                        currentTermScore = float(lineElements[1 + i])
                        if currentTermScore != 0.0:
                            postingKey = currentTermID + "_" + theTrecID
                            documentResultWithPostingValuesDict[documentResultKey][postingKey] = currentTermScore
                            outputLine += currentTermID + " " + str(currentTermScore) + " "
                            if postingKey not in postingWithDocumentResultDict:
                                postingWithDocumentResultDict[postingKey] = []
                                postingWithDocumentResultDict[postingKey].append(documentResultKey)
                    
                    outputLine += str(theDocID) + " "
                    outputLine = outputLine.strip() + "\n"
                    outputFileHandler.write(outputLine)
                    
                    numOfResultsForTheCurrentQuery += 1
                    currentResultLines.append(currentLine.strip())
                    currentLine = inputFileHandler1.readline()
                    lineElements = currentLine.strip().split(" ")
                   
                numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
                
                # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
                # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
                # This sampling method has been developed on 2013/01/09
                
                print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                print "len(currentResultLines):",len(currentResultLines)
                print
            
        currentLine = inputFileHandler1.readline()
        
    print "numOfQueriesHavingQID:",numOfQueriesHavingQID
    print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
    print "numOfQueriesHavingSearchResults:",numOfQueriesHavingSearchResults
    print "getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile() function ends."

termANDTermIDDict = {}
# the input format of the file should be the following:
# (termID, term)
# for dodo:
# ifn = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
# for pangolin:
ifn = "/data/obukai/gov2ClearYourMindAndDoItAgain2014/wholeLexiconTermsWithTermID"
inputFileHandler0 = open(ifn,"r")
currentLine = inputFileHandler0.readline()
currentLineNum = 0

while currentLine:
    if currentLineNum % 1000000 == 0:
        print currentLineNum,"lines loaded."
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTerm = currentLineElements[1]
    if currentTerm not in termANDTermIDDict:
        termANDTermIDDict[currentTerm] = currentTermID
    currentLine = inputFileHandler0.readline()
    currentLineNum += 1
print "len(termANDTermIDDict): ",len(termANDTermIDDict)
print "termANDTermIDDict['0000000000000000']: ",termANDTermIDDict['0000000000000000']
inputFileHandler0.close()

# key: qID_trecID
# value: dict
    # key: termID_trecID
    # value: impact score1
documentResultWithPostingValuesDict = {}

# key: posting
# value: [] a list containing all the document_result keys
postingWithDocumentResultDict = {}

inputFileName1 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/humanJudgeRawResults_TOP100_AND"
outputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/AND_top100_100%_tb04-06_final_with_termID_and_score_added"

getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile(inputFileName1,outputFileName,termANDTermIDDict,100,documentResultWithPostingValuesDict,postingWithDocumentResultDict)
print "overall:"
print "inputFileName1:",inputFileName1
print "outputFileName:",outputFileName
exit(1)

print "len(documentResultWithPostingValuesDict):",len(documentResultWithPostingValuesDict)
print "len(postingWithDocumentResultDict):",len(postingWithDocumentResultDict)

tempCounter = 0
for documentResultKey in documentResultWithPostingValuesDict:
    print documentResultKey,documentResultWithPostingValuesDict[documentResultKey]
    tempCounter += 1
    if tempCounter == 10:
        break

tempCounter = 0
for posting in postingWithDocumentResultDict:
    print posting,postingWithDocumentResultDict[posting]
    tempCounter += 1
    if tempCounter == 10:
        break

inputFileName2 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/OR_top100_100%_tb04-06_final"
inputFileHandler0 = open(inputFileName2,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split("\t")
    currentQID = lineElements[0]
    currentDocID = lineElements[2]
    documentResultKey = currentQID + "_" + currentDocID
    outputLine = line.strip() + " "
    for postingKey in documentResultWithPostingValuesDict[documentResultKey]:
        termID = postingKey.strip().split("_")[0]
        termScore = documentResultWithPostingValuesDict[documentResultKey][postingKey]
        outputLine += termID + " " + str(termScore) + " "
    outputLine += "\n"
    outputFileHandler.write( outputLine )
exit(1)



'''
documentResultDictFromSearchSystem = {}
inputFileName1 = "/data/obukai/gov2ClearYourMindAndDoItAgain/pruningProjectResults/reproduceRomanQueryEffectiveness_20140121Night/AND_Semantics/AND_top10000_100%_tb04-06_final"
inputFileHandler0 = open(inputFileName1,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split("\t")
    currentQID = lineElements[0]
    currentTrecID = lineElements[2]
    documentResultKey = currentQID + "_" + currentTrecID
    currentRank = int(lineElements[3])

    if currentRank <= 9:
        if documentResultKey not in documentResultDictFromSearchSystem:
            documentResultDictFromSearchSystem[documentResultKey] = 1
        else:
            print "duplicated."
            exit(1)
print "len(documentResultDictFromSearchSystem):",len(documentResultDictFromSearchSystem)
inputFileHandler0.close()


documentResultDictFromHumanJudgeQueries = {}
inputFileName2 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/qrels.tb04-tb06.top150_with_docID_added_termid_added_AND"
inputFileHandler0 = open(inputFileName2,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    currentTrecID = lineElements[2]
    relevenceLabel = lineElements[3]
    if relevenceLabel != "0":
        documentResultKey = currentQID + "_" + currentTrecID
        if documentResultKey not in documentResultDictFromHumanJudgeQueries:
            documentResultDictFromHumanJudgeQueries[documentResultKey] = 1
print "len(documentResultDictFromHumanJudgeQueries): ",len(documentResultDictFromHumanJudgeQueries)
inputFileHandler0.close()

numOfTOP10DocumentResultReturnedIntersectionSet = set(documentResultDictFromSearchSystem).intersection( set(documentResultDictFromHumanJudgeQueries) )
numOfTOP10DocumentResultReturnedUnionSet = set(documentResultDictFromSearchSystem).union( set(documentResultDictFromHumanJudgeQueries) )
print "len(numOfTOP10DocumentResultReturnedIntersectionSet):",len(numOfTOP10DocumentResultReturnedIntersectionSet)
print "len(numOfTOP10DocumentResultReturnedUnionSet):",len(numOfTOP10DocumentResultReturnedUnionSet)

exit(1)

outputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/OR_top10_100%_tb04-06_final_with_termID_and_score_added_docID_added_REFORMATED"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/OR_top10_100%_tb04-06_final_with_termID_and_score_added_docID_added"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split("\t")
    outputFileLine = ""
    for element in lineElements:
        outputFileLine += element + " "
    outputFileLine = outputFileLine.strip()
    outputFileLine += "\n"
    outputFileHandler.write(outputFileLine)
inputFileHandler0.close()
outputFileHandler.close()
print "Overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)

trecIDANDDocIDDict = {}
ifn = "/data/obukai/gov2ClearYourMindAndDoItAgain2014/trecID_docID_MappingTableForGov2Dataset"
inputFileHandler0 = open(ifn,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTrecID = currentLineElements[0]
    currentDocID =  currentLineElements[1]
    if currentTrecID not in trecIDANDDocIDDict:
        trecIDANDDocIDDict[currentTrecID] = currentDocID  
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "len(trecIDANDDocIDDict):",len(trecIDANDDocIDDict)

outputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/OR_top10_100%_tb04-06_final_with_termID_and_score_added_docID_added"
outputFileHandler = open(outputFileName,"w")

inputFileName1 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/OR_top10_100%_tb04-06_final_with_termID_and_score_added"
inputFileHandler0 = open(inputFileName1,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split("\t")
    currentTrecID = lineElements[2]
    currentDocID = trecIDANDDocIDDict[currentTrecID]
    outputLine = line.strip()
    outputFileHandler.write(outputLine + " " + currentDocID + "\n")
inputFileHandler0.close()
outputFileHandler.close()

print "Overall:"
print "ifn:",ifn
print "inputFileName1:",inputFileName1
print "outputFileName:",outputFileName
exit(1)
'''

outputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/OR_top10_100%_tb04-06_final"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data/obukai/gov2ClearYourMindAndDoItAgain/pruningProjectResults/reproduceRomanQueryEffectiveness_20140121Night/OR_Semantics/OR_top10000_100%_tb04-06_final"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split("\t")
    theRank = int(lineElements[3])
    if theRank < 10:
        outputFileHandler.write(line)
inputFileHandler0.close()
outputFileHandler.close()
print "overall:"
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)

numOfBytes = 0
numOfPostingsProcessed = 0
inputFileName3 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/scripts/src/cScripts/resultfile0"
inputFileHandler0 = open(inputFileName3,"rb")
statinfo = os.stat(inputFileName3)
fileSize = statinfo.st_size
print "file size:",fileSize

while numOfBytes < fileSize:
    '''
    # old version
    # each time, just read the info of ONE posting, too few
    byteString = inputFileHandler0.read(4 + 4 + 4)
    numOfPostingsProcessed += 1
    if numOfPostingsProcessed % 1000000 == 0:
        print "numOfPostingsProcessed:",numOfPostingsProcessed
    # (termID,docID,currentProbability) = unpack( "2I1f", byteString)
    '''
    
    
    # current version
    # each time, read the info of 1M postings
    byteStringBuffer = inputFileHandler0.read( 1000000 * 12)
    byteStringBufferIndexPosition = 0
    for i in range(0,1000000):
        byteString = byteStringBuffer[byteStringBufferIndexPosition:byteStringBufferIndexPosition+12]
        byteStringBufferIndexPosition += 12
        (termID,docID,currentProbability) = unpack( "2I1f", byteString)
        numOfBytes += 12
    numOfPostingsProcessed += 1000000
    print "numOfPostingsProcessed:",numOfPostingsProcessed
    
    
exit(1)


numOfHumanJudgeRelatedDocumentResult = 0
inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/qrels.tb04-tb06.top150_with_docID_added_termid_added"
inputFileHandler0 = open(inputFileName,"r")

for index,line in enumerate( inputFileHandler0.readlines() ):
    lineElements = line.strip().split(" ")
    relevanceLabel = lineElements[3]
    if relevanceLabel != "0":
        numOfHumanJudgeRelatedDocumentResult += 1

print "Overall:"
print "numOfHumanJudgeRelatedDocumentResult:",numOfHumanJudgeRelatedDocumentResult
print "inputFileName:",inputFileName
inputFileHandler0.close()
exit(1)

inputFileName = "/data/obukai/gov2ClearYourMindAndDoItAgain/pruningProjectResults/reproduceRomanQueryEffectiveness_20140121Night/AND_Semantics/AND_top10000_100%_tb04-06_final"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split("\t")
    print "currentLineElements:",currentLineElements
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
exit(1)

inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/scripts/src/cScripts/resultfile0"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize
# 77423376120
# first posting:
byteString = inputFileHandler0.read(4 + 4 + 4)
(termID,docID,currentProbability) = unpack( "2I1f", byteString)
print termID,docID,currentProbability
# last posting:
inputFileHandler0.seek(77423376108)
byteString = inputFileHandler0.read(4 + 4 + 4)
(termID,docID,currentProbability) = unpack( "2I1f", byteString)
print termID,docID,currentProbability
inputFileHandler0.close()
exit(1)


inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/scripts/src/cScripts/resultfile0"
inputFileHandler0 = open(inputFileName,"rb")
statinfo = os.stat(inputFileName)
fileSize = statinfo.st_size
print "file size:",fileSize
numOfBytes = 0

previousProbablity = 1.0
while numOfBytes < fileSize:
    byteString = inputFileHandler0.read(4 + 4 + 4)
    (termID,docID,currentProbability) = unpack( "2I1f", byteString)
    print termID,docID,currentProbability
    if previousProbablity >= currentProbability:
        # print "previousProbablity:",previousProbablity
        # print "currentProbability:",currentProbability
        previousProbablity = currentProbability
        # exit(1)
    else:
        exit(1)
    numOfBytes += 12

inputFileHandler0.close()
exit(1)

##################
# It is NEEDED when applying to the whole gov2 index
# step-1: load the info for term AND termID converter 
termANDTermIDDict = {}

# the input format of the file should be the following:
# termID
# term
# 1.7M terms in the lexicon
# It takes 3.817s to load
# inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/TermsWithTermIDFor90778ImportantAndFakeDocsFromGov2"
ifn = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
# It takes 2 mins to load
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
inputFileHandler0 = open(ifn,"r")
currentLine = inputFileHandler0.readline()
currentLineNum = 0

while currentLine:
    if currentLineNum % 1000000 == 0:
        print currentLineNum,"lines loaded."
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTerm = currentLineElements[1]
    if currentTerm not in termANDTermIDDict:
        termANDTermIDDict[currentTerm] = currentTermID
    currentLine = inputFileHandler0.readline()
    currentLineNum += 1
print "len(termANDTermIDDict): ",len(termANDTermIDDict)
print "termANDTermIDDict['0000000000000000']: ",termANDTermIDDict['0000000000000000']
inputFileHandler0.close()
# exit(1)


queryIDANDContentDict = {}
queryIDANDTermIDListDict = {}
inputFileName1 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/reproduceRomanQueryEffectiveness_20140121Night/04-06.topics.701-850.polyIRTKCompatibleMode"
inputFileHandler0 = open(inputFileName1,"r")

# outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1.txt.input"
# outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    queryID = line.strip().split(":")[0]
    queryContent = ""
    queryContentIDs = ""
    data = line.strip().split(":")[1]
    data = data.lower()
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    
    dataElements = data.strip().split(" ")
    for element in dataElements:
        currentTerm = element.strip()
        if currentTerm != "" and currentTerm in termANDTermIDDict:
            queryContent += currentTerm + " "
            queryContentIDs += termANDTermIDDict[currentTerm] + " "
    queryContent = queryContent.strip()
    queryContentIDs = queryContentIDs.strip()
    if queryID not in queryIDANDContentDict:
        queryIDANDContentDict[queryID] = queryContent
        queryIDANDTermIDListDict[queryID] = queryContentIDs

print "queryIDANDContentDict:",queryIDANDContentDict
print "queryIDANDTermIDListDict:",queryIDANDTermIDListDict
print "len(queryIDANDContentDict):",len(queryIDANDContentDict)
print "len(queryIDANDTermIDListDict):",len(queryIDANDTermIDListDict)
print "queryIDANDContentDict['701']",queryIDANDContentDict['701']
print "queryIDANDContentDict['702']",queryIDANDContentDict['702']
print "queryIDANDTermIDListDict['701']",queryIDANDTermIDListDict['701']
print "queryIDANDTermIDListDict['702']",queryIDANDTermIDListDict['702']
inputFileHandler0.close()

inputFileName2 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/reproduceRomanQueryEffectiveness_20140121Night/qrels.tb04-tb06.top150_with_docID_added"
inputFileHandler0 = open(inputFileName2,"r")

outputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/reproduceRomanQueryEffectiveness_20140121Night/qrels.tb04-tb06.top150_with_docID_added_termid_added"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    outputLine = line.strip() + " " + queryIDANDTermIDListDict[queryID] + "\n"
    outputFileHandler.write(outputLine)

inputFileHandler0.close()
outputFileHandler.close()
print "Overall:"
print "ifn:",ifn
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
exit(1)
##################

docIDANDTrecIDDict = {}
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_MappingTableForGov2Dataset"
inputFileHandler0 = open(inputFileName1,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTrecID = currentLineElements[0]
    currentDocID = currentLineElements[1]
    if currentTrecID not in docIDANDTrecIDDict:
        docIDANDTrecIDDict[currentTrecID] = currentDocID 
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "len(len(docIDANDTrecIDDict)):",len(docIDANDTrecIDDict)



currentDocDict = {}
outputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/reproduceRomanQueryEffectiveness_20140121Night/qrels.tb04-tb06.top150_with_docID_added"
outputFileHandler = open(outputFileName,"w")

inputFileName2 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/reproduceRomanQueryEffectiveness_20140121Night/qrels.tb04-tb06.top150"
inputFileHandler0 = open(inputFileName2,"r")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    docID = lineElements[2]
    if docID not in currentDocDict:
        currentDocDict[docID] = 1
    else:
        currentDocDict[docID] += 1
    
    internalDocID = "-1"
    if lineElements[2] in docIDANDTrecIDDict:
        internalDocID = docIDANDTrecIDDict[ lineElements[2] ]
    
    outputFileHandler.write(lineElements[0] + " " + lineElements[1] + " " + lineElements[2] + " " + lineElements[3] + " " + internalDocID + "\n")

print "len(currentDocDict):",len(currentDocDict)

inputFileHandler0.close()
outputFileHandler.close()
print "Overall:"
print "inputFileName1: ",inputFileName1
print "inputFileName2: ",inputFileName2
print "outputFileName: ",outputFileName
exit(1)

inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/allPostingsBeingPoppedAtDifferentLevels_weight_0_20140218Afternoon_DEBUG"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
exit(1)


# inputFileName1 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/allPostingsBeingPoppedAtDifferentLevels_weight_0_20140218Afternoon_DEBUG"
# inputFileName2 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/allPostingsBeingPoppedAtDifferentLevels_weight_0_20140222Afternoon_OR_DEBUG"
inputFileName1 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/allPostingsBeingPoppedAtDifferentLevels_weight_5_20140217Night_DEBUG"
inputFileName2 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/allPostingsBeingPoppedAtDifferentLevels_weight_5_20140218Afternoon_DEBUG"

inputFileHandler1 = open(inputFileName1,"r")
inputFileHandler2 = open(inputFileName2,"r")

currentLineFromFile1 = inputFileHandler1.readline()
currentLineFromFile2 = inputFileHandler2.readline()
numOfQueriesFallingThrough = 1

while currentLineFromFile1:
    if numOfQueriesFallingThrough % 1000000 == 0:
        print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough
    
    if currentLineFromFile1.strip() == currentLineFromFile2.strip():
        pass
    else:
        print "currentLineFromFile1:",currentLineFromFile1
        print "currentLineFromFile2:",currentLineFromFile2
        exit(1)
    
    currentLineFromFile1 = inputFileHandler1.readline()
    currentLineFromFile2 = inputFileHandler2.readline()    
    numOfQueriesFallingThrough += 1
inputFileHandler1.close()
inputFileHandler2.close()
exit(1)

docResultDict = {}
# file1:
# inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/tail5KResults_sortedByQID_NEW_FORMAT_20140210Afternoon"
# file2:
inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/tail5KResults_NEW_FORMAT_20140222Afternoon_OR"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentQID = currentLineElements[0]
    currentDocID = currentLineElements[1]
    key = currentQID + "_" + currentDocID 
    if key not in docResultDict:
        docResultDict[key] = 1
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "len(docResultDict): ",len(docResultDict)
exit(1)

inputFileName = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset_sortedByDocID_head_1000000"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
numOfQueriesFallingThrough = 0

totalNumOfPostings = 0
currentNumOfPostings = 0
totalNumOfBytes = 0
while currentLine:
     
     currentLineElements = currentLine.strip().split(" ")
     currentNumOfPostings = int(currentLineElements[1])
     totalNumOfPostings += currentNumOfPostings
     totalNumOfBytes += 4 + 4 + currentNumOfPostings * 3 * 4
     
     currentLine = inputFileHandler0.readline()
     numOfQueriesFallingThrough += 1

     if numOfQueriesFallingThrough > numOfQueriesFallingThrough:
         print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough
         break
     
print "totalNumOfPostings:",totalNumOfPostings
print "totalNumOfBytes:",totalNumOfBytes
inputFileHandler0.close()
exit(1)


inputFileName = "/data/obukai/workspace/web-search-engine-wei-2014-Feb/polyIRIndexer/docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset_sortedByDocID"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
numOfQueriesFallingThrough = 0

score1 = 0
numOfBytes = 0
while currentLine:
     if numOfQueriesFallingThrough >= 12602589:
         currentLineElements = currentLine.strip().split(" ")
         score1 += int(currentLineElements[1])
         numOfBytes = 4 + 4 + score1 * 3 * 4
     
     if numOfQueriesFallingThrough > 25128544:
         print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough
         break
     
     currentLine = inputFileHandler0.readline()
     numOfQueriesFallingThrough += 1

print "score1:",score1
print "numOfBytes:",numOfBytes
inputFileHandler0.close()
exit(1)


# It is NEEDED when applying to the whole gov2 index
# step-1: load the info for term AND termID converter 
termANDTermIDDict = {}
# the input format of the file should be the following:
# termID
# term
# 1.7M terms in the lexicon
# It takes 3.817s to load
# inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/TermsWithTermIDFor90778ImportantAndFakeDocsFromGov2"
inputFileName2 = "/data/obukai/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
# It takes 2 mins to load
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
inputFileHandler0 = open(inputFileName2,"r")
currentLine = inputFileHandler0.readline()
currentLineNum = 0

while currentLine:
    if currentLineNum % 1000000 == 0:
        print currentLineNum,"lines loaded."
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTerm = currentLineElements[1]
    if currentTerm not in termANDTermIDDict:
        termANDTermIDDict[currentTerm] = currentTermID
    currentLine = inputFileHandler0.readline()
    currentLineNum += 1
print "len(termANDTermIDDict): ",len(termANDTermIDDict)
print "termANDTermIDDict['0000000000000000']: ",termANDTermIDDict['0000000000000000']
inputFileHandler0.close()
# exit(1)


outputFileName = "/data/obukai/gov2ClearYourMindAndDoItAgain/tail5KResults_sortedByQID_NEW_FORMAT_20140210Afternoon"
outputFileHandler = open(outputFileName,"w")

# It is NEEDED when applying to the whole gov2 index
# step0: load the info for all the related TOP10 postings of the final 5K queries
# It takes 1s
top10RelatedPostingsDict = {}
# The input format of the file
# queryID
# docID
# term
# (Just guess) rank in the list for this posting
# pieceNumIndex
# inputFileName3 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/tail5KResultsWithRelatedPieceNum_sortedByQID"
inputFileName3 = "/data/obukai/gov2ClearYourMindAndDoItAgain/tail5KResultsWithRelatedPieceNum_sortedByQID"
inputFileHandler0 = open(inputFileName3,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTerm = currentLineElements[2]
    currentDocID = currentLineElements[1]
    currentTermID = 50000000
    if currentTerm in termANDTermIDDict:
        currentTermID = termANDTermIDDict[currentTerm]
    currentPostingKey = currentTermID + "_" + currentDocID 
    if currentPostingKey not in top10RelatedPostingsDict:
        top10RelatedPostingsDict[currentPostingKey] = 1
        
    outputFileHandler.write(currentLineElements[0] + " " + currentDocID + " " + currentTermID + "\n")
    
    currentLine = inputFileHandler0.readline()
# print "top10RelatedPostingsDict['bronx_21911485']: ",top10RelatedPostingsDict['bronx_21911485']
print "len(top10RelatedPostingsDict):",len(top10RelatedPostingsDict)
# print top10RelatedPostingsDict
inputFileHandler0.close()
outputFileHandler.close()
print "Overall:"
print "inputFileName2:",inputFileName2
print "inputFileName3:",inputFileName3
print "outputFileName:",outputFileName
exit(1)


tempCounter = 0
inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPostingValuesInfo20140209Afternoon_SECOND_HALF_DOC_weight_1"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
currentLineElements = currentLine.strip().split(" ")
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    tempCounter += int(currentLineElements[1])
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "tempCounter: ",tempCounter
exit(1)



currentDocDict = {}
inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
currentLineElements = currentLine.strip().split(" ")
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocID = currentLineElements[0]
    if currentDocID not in currentDocDict:
        currentDocDict[currentDocID] = 1
    else:
        print "currentDocID: ",currentDocID
        currentDocDict[currentDocID] += 1
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "len(currentDocDict): ",len(currentDocDict)
exit(1)

inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPostingValuesInfo20140208Afternoon_HALF_DOC_weight_1"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
currentLineElements = currentLine.strip().split(" ")
print "length: ",len(currentLineElements)
print "*****"
currentLine = inputFileHandler0.readline()
currentLineElements = currentLine.strip().split(" ")
print "length: ",len(currentLineElements)
inputFileHandler0.close()
exit(1)

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/impactWithLengthAnalyze20131130Night/denominator"
inputFileHandler0 = open(inputFileName,"r")
totalNumOfPostings = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    if lineElements == 3:
        totalNumOfPostings += lineElements[2]
print "totalNumOfPostings:",totalNumOfPostings
inputFileHandler0.close()
exit(1)
'''

'''
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/debug20131122Afternoon_CORRECT"
inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/debug20131121Night_CORRECT"
inputFileName3 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/debug20131122Night_CORRECT"

inputFileHandler1 = open(inputFileName1,"r")
inputFileHandler2 = open(inputFileName2,"r")
inputFileHandler3 = open(inputFileName3,"r")

numOfQueriesFallingThrough = 0

torlenceCounter = 100

currentLineFromFile1 = inputFileHandler1.readline()
currentLineFromFile2 = inputFileHandler2.readline()
currentLineFromFile3 = inputFileHandler3.readline()
numOfQueriesFallingThrough += 1

while currentLineFromFile1:
    if currentLineFromFile1 == currentLineFromFile2 and currentLineFromFile2 == currentLineFromFile3:
        pass
    else:
        print "currentLineFromFile1: ",currentLineFromFile1.strip()
        print "currentLineFromFile2: ",currentLineFromFile2.strip()
        print "currentLineFromFile3: ",currentLineFromFile3.strip()
        print "numOfQueriesFallingThrough: ",numOfQueriesFallingThrough
        print "torlenceCounter: ",torlenceCounter
        torlenceCounter -= 1
        if torlenceCounter == 0:
            exit(1)
    
    currentLineFromFile1 = inputFileHandler1.readline()
    currentLineFromFile2 = inputFileHandler2.readline()
    currentLineFromFile3 = inputFileHandler3.readline()    
    numOfQueriesFallingThrough += 1
    
inputFileHandler1.close()
inputFileHandler2.close()
inputFileHandler3.close()
exit(1)
'''

'''
# weight = 0
inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/debug20131122Afternoon_CORRECT"
# weight = 1
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/debug20131121Night_CORRECT"
# weight = 1000
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/debug20131122Night_CORRECT"
inputFileHandler0 = open(inputFileName,"r")

inputFileHandler0.close()
'''

'''
trecIDANDLocalBadyDocIDDict = {}
inputFileName1 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/trecIDANDLocalBadyDocIDMappingTable"
inputFileHandler0 = open(inputFileName1,"r")
for currentLine in inputFileHandler0.readlines():
    currentLineElements = currentLine.strip().split(" ")
    currentTrecID = currentLineElements[0]
    currentLocalBabyDocID = currentLineElements[1]
    if currentTrecID not in trecIDANDLocalBadyDocIDDict:
        trecIDANDLocalBadyDocIDDict[currentTrecID] = currentLocalBabyDocID
print "len(trecIDANDLocalBadyDocIDDict):",len(trecIDANDLocalBadyDocIDDict)
inputFileHandler0.close()

trecIDANDUniversalDocIDDict = {}
inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_docID_MappingTableForGov2Dataset"
inputFileHandler0 = open(inputFileName2,"r")
for currentLine in inputFileHandler0.readlines():
    currentLineElements = currentLine.strip().split(" ")
    currentTrecID = currentLineElements[0]
    currentUniversalDocID = currentLineElements[1]
    if currentTrecID not in trecIDANDUniversalDocIDDict:
        trecIDANDUniversalDocIDDict[currentTrecID] = currentUniversalDocID
print "len(trecIDANDUniversalDocIDDict):",len(trecIDANDUniversalDocIDDict)
inputFileHandler0.close()

outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_WholeIndexUniversalDocID_MappingTableForBabyIndex"
outputFileHandler = open(outputFileName,"w")
for currentTrecID in trecIDANDLocalBadyDocIDDict:
    outputFileHandler.write(currentTrecID + " " + trecIDANDLocalBadyDocIDDict[currentTrecID] + " " + trecIDANDUniversalDocIDDict[currentTrecID] + "\n")
outputFileHandler.close()
print "Overall Processing Statistics:"
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
exit(1)
'''

'''
inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20131107Night_weight_10_WITH_TIME_COST_Added_2"
inputFileHandler0 = open(inputFileName,"r")
score1 = 0
currentLine = inputFileHandler0.readline()
while currentLine:
    score1 += int( currentLine.strip().split(" ")[1] )
    currentLine = inputFileHandler0.readline()
print "score1: ",score1
inputFileHandler0.close()
exit(1)
'''

'''
inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20131107Night_weight_10_WITH_TIME_COST_Added_2"
inputFileHandler0 = open(inputFileName,"r")
numOfQueriesFallingThrough = 0

currentLine = inputFileHandler0.readline()
numOfQueriesFallingThrough += 1

while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    if len(currentLineElements[1]) != len(currentLineElements) - 4:
        pass
    else:
        print "critical error"
        print "len(currentLineElements[1]): ",len(currentLineElements[1])
        print "len(currentLineElements) - 4: ",len(currentLineElements) - 4
        exit(1)
    currentLine = inputFileHandler0.readline()
    print "numOfQueriesFallingThrough: ",numOfQueriesFallingThrough
    numOfQueriesFallingThrough += 1

inputFileHandler0.close()
exit(1)
'''

'''
inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20131107Night_weight_10_WITH_TIME_COST_Added_2"
inputFileHanlder = open(inputFileName,"r")
currentLine = inputFileHanlder.readline()
print len(currentLine.strip().split(" "))
currentLine = inputFileHanlder.readline()
print len(currentLine.strip().split(" "))
inputFileHanlder.close()
exit(1)
'''

'''
inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20131107Night_weight_10_WITH_TIME_COST_Added_2_PART_OF_COMPLETED"
inputFileHandler0 = open(inputFileName,"r")
numOfDocs = 0
numOfPostingsForCurrentDocument = 0
xPoints = []
yPoints = []
denomunator = 7.40691e-07
for line in inputFileHandler0.readlines():
    if line.strip().startswith("current_largest_value_of_the_posting_array:"):
        numOfPostingsForCurrentDocument += 1
        yValue = float( line.strip().split(" ")[1] ) / denomunator 
        xPoints.append(numOfPostingsForCurrentDocument)
        yPoints.append(yValue)
    if line.strip().endswith("j: 0"):
        print "docID: ",numOfDocs-1
        print "len(xPoints): ",len(xPoints)
        print "len(yPoints): ",len(yPoints)
        matplotlib.pyplot.scatter(xPoints,yPoints)
        matplotlib.pyplot.show()
        numOfPostingsForCurrentDocument = 0
        xPoints = []
        yPoints = []
        numOfDocs += 1
print "numOfDocs: ",numOfDocs        
inputFileHandler0.close()
exit(1)
'''

'''
# option1:
# inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20131107Night_weight_10_WITH_TIME_COST_Added_PART_OF"
# option2:
inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20131107Night_weight_10_WITH_TIME_COST_Added_2"
inputFileHandler0 = open(inputFileName,"r")
totalMS = 0
totalNumOfPostings = 0
numOfDocuments = 0
for line in inputFileHandler0.readlines():
    numOfDocuments += 1
    lineElements = line.strip().split(" ")
    totalNumOfPostings += int( lineElements[1] )
    # print "lineElements[2][:-2]: ",lineElements[2][:-2]
    totalMS += float( lineElements[2][:-2] )
print "totalMS: ",totalMS,"ms"
print "totalNumOfPostings: ",totalNumOfPostings
print "numOfDocuments: ",numOfDocuments
print "avgNumOfPostings: ",totalNumOfPostings/numOfDocuments
print "avgMSForEachDocument: ",totalMS/numOfDocuments,"ms"
inputFileHandler0.close()
exit(1)
'''

'''
inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/debug"
inputFileHandler0 = open(inputFileName,"r")
sumOfValue = 0.0
sumOfTotalPostings = 0
numOfQueriesFallingThrough = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    sumOfValue += float(lineElements[2][:-2])
    sumOfTotalPostings += int(lineElements[1])
    numOfQueriesFallingThrough += 1
print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough
print "sumOfValue/numOfQueriesFallingThrough:",sumOfValue/numOfQueriesFallingThrough
print "sumOfTotalPostings/numOfQueriesFallingThrough:",sumOfTotalPostings/numOfQueriesFallingThrough
inputFileHandler0.close()
exit(1)
'''

'''
inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20131107Night_weight_10_WITH_TIME_COST_Added"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
currentLineElements = currentLine.strip().split(" ")
print "len(currentLineElements): ",len(currentLineElements)
inputFileHandler0.close()
'''



'''
inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/indexDocIDANDNumOfPostingsMappingTable"
inputFileHandler0 = open(inputFileName,"r")
currentSumNumOfPostingsInDoc = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentNumOfPostingsInDoc = int(lineElements[1])
    currentSumNumOfPostingsInDoc += currentNumOfPostingsInDoc
print "currentSumNumOfPostingsInDoc:",currentSumNumOfPostingsInDoc
inputFileHandler0.close()
exit(1)
'''

'''
# step2:
termDict = {}
# inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819_25714_DEBUG_For_1_doc_file"
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819_25714_DEBUG_For90778ImportantAndFakeDocsFromGov2"
inputFileHandler0 = open(inputFileName1,"r")
for line in inputFileHandler0.readlines():
    currentTerm = line.strip().split(" ")[0]
    if currentTerm not in termDict:
        termDict[currentTerm] = 1
inputFileHandler0.close()
print "len(termDict):",len(termDict)

outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/TermsWithTermIDFor90778ImportantAndFakeDocsFromGov2"
outputFileHandler = open(outputFileName,"w")

inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
inputFileHandler0 = open(inputFileName2,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentTerm = currentLine.strip().split(" ")[1]
    if currentTerm in termDict:
        outputFileHandler.write(currentLine)
    else:
        pass
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
outputFileHandler.close()
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
exit(1)
'''

'''
# step1:
termDict = {}
# Each line, the file format should be: "traverse the team: 0"
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/TermsWithTermIDFor90778ImportantAndFakeDocsFromGov2"
inputFileHandler0 = open(inputFileName1,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(":")
    currentTerm = lineElements[1].strip()
    if currentTerm not in termDict:
        termDict[currentTerm] = 1
    else:
        print "critical error."
print "len(termDict):",len(termDict)
# print "termDict['00002']:",termDict['00002']
inputFileHandler0.close()

outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain//wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819_25714_DEBUG_For90778ImportantAndFakeDocsFromGov2"
outputFileHandler = open(outputFileName,"w")

inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
inputFileHandler0 = open(inputFileName2,"r")
numOfQueriesFallingThrough = 0
currentLine = inputFileHandler0.readline()
numOfQueriesFallingThrough += 1
while currentLine:
    if numOfQueriesFallingThrough % 1000000 == 0:
        print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough,"processed."
    
    currentTerm = currentLine.strip().split(" ")[0]
    if currentTerm not in termDict:
        pass
    else:
        outputFileHandler.write(currentLine)
    currentLine = inputFileHandler0.readline()
    numOfQueriesFallingThrough += 1
inputFileHandler0.close()
outputFileHandler.close()
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
exit(1)
'''

'''
sizeDistributionDict = {}
# the pre-defined gaps(lower bounds and upper bounds) are as following:
# gaps:
# 0 [0,10)
# 1 [10,50)
# 2 [50,100)
# 3 [100,200)
# 4 [200,500)
# 5 [500,MAX BIG)

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset_head_1092"
inputFileHandler0 = open(inputFileName,"r")
numOfQueriesFallingThrough = 0
sumNumOfPostings = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentNumOfPostings = int( lineElements[1] )
    if currentNumOfPostings >= 0 and currentNumOfPostings < 10:
        if 0 not in sizeDistributionDict:
            sizeDistributionDict[0] = 1
        else:
            sizeDistributionDict[0] += 1
    if currentNumOfPostings >= 10 and currentNumOfPostings < 50:
        if 1 not in sizeDistributionDict:
            sizeDistributionDict[1] = 1
        else:
            sizeDistributionDict[1] += 1
    if currentNumOfPostings >= 50 and currentNumOfPostings < 100:
        if 2 not in sizeDistributionDict:
            sizeDistributionDict[2] = 1
        else:
            sizeDistributionDict[2] += 1
    if currentNumOfPostings >= 100 and currentNumOfPostings < 200:
        if 3 not in sizeDistributionDict:
            sizeDistributionDict[3] = 1
        else:
            sizeDistributionDict[3] += 1
    if currentNumOfPostings >= 200 and currentNumOfPostings < 500:
        if 4 not in sizeDistributionDict:
            sizeDistributionDict[4] = 1
        else:
            sizeDistributionDict[4] += 1
    if currentNumOfPostings >= 500:
        if 5 not in sizeDistributionDict:
            sizeDistributionDict[5] = 1
        else:
            sizeDistributionDict[5] += 1    
    sumNumOfPostings += currentNumOfPostings
    numOfQueriesFallingThrough += 1
print "sumNumOfPostingsForALLDocs:",sumNumOfPostings
print "avgNumOfPostingsPerDoc:",sumNumOfPostings/numOfQueriesFallingThrough
print "# 0 [0,10)",sizeDistributionDict[0]
print "# 1 [10,50)",sizeDistributionDict[1]
print "# 2 [50,100)",sizeDistributionDict[2]
print "# 3 [100,200)",sizeDistributionDict[3]
print "# 4 [200,500)",sizeDistributionDict[4]
print "# 5 [500,MAX BIG)",sizeDistributionDict[5]
inputFileHandler0.close()
exit(1)
'''

'''
# code logic currently under construction 2013/09/30 afternoon
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2DocumentWithTheirNumOfPostingsRecordedAndTheDistinctSetOfTerms_20130926Night_xaa"
inputFileHandler0 = open(inputFileName,"r")
inputFileHandler0.close()
'''

'''
randomlySelectedIDDict = {}
totalNumOfPostingsSelected = 1000 # 1K
totalNumOfSamples = 100000 # 100K
# assume that the selectedID starts from 0 to 25205179 in the total of 25,205,179 (25M)
while len(randomlySelectedIDDict) != totalNumOfPostingsSelected:
    # Return a random integer N such that a <= N <= b
    selectedID = random.randint(0, totalNumOfSamples-1)
    if selectedID not in randomlySelectedIDDict:
        randomlySelectedIDDict[selectedID] = 1

print "randomlySelectedIDDict:",randomlySelectedIDDict
print "len(randomlySelectedIDDict):",len(randomlySelectedIDDict)

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithLocalPostingIDsWithStaticANDDynamicPart_20130928Afternoon"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithLocalPostingIDsWithStaticANDDynamicPart_20130928Afternoon_random_1000.csv"
outputFileHandler = open(outputFileName,"w")

for index,line in enumerate( inputFileHandler0.readlines() ):
    if index in randomlySelectedIDDict:
        outputFileHandler.write(line)
    else:
        pass

print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
inputFileHandler0.close()
outputFileHandler.close()
exit(1)        
'''



'''
inputFileNameList = []
inputFileName_a = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xaa"
inputFileName_b = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xab"
inputFileName_c = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xac"
inputFileName_d = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xad"
inputFileName_e = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xae"
inputFileName_f = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xaf"
inputFileName_g = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xag"
inputFileName_h = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xah"
inputFileName_i = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xai"
inputFileName_j = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xaj"

inputFileNameList.append(inputFileName_a)
inputFileNameList.append(inputFileName_b)
inputFileNameList.append(inputFileName_c)
inputFileNameList.append(inputFileName_d)
inputFileNameList.append(inputFileName_e)
inputFileNameList.append(inputFileName_f)
inputFileNameList.append(inputFileName_g)
inputFileNameList.append(inputFileName_h)
inputFileNameList.append(inputFileName_i)
inputFileNameList.append(inputFileName_j)

firstPart = "/data/jhe/trecdata/"
lastPart = ".gz"
for currentInputFileName in inputFileNameList:
    outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/" + "gov2_files_" + currentInputFileName[-3:] + "_with_command_19_added"
    outputFileHandler = open(outputFileName,"w")
    outputFileHandler.write("19" + "\n")
    # key: compressFilePath
    # value: (NO USE currently)
    compressFilePathDict = {}
    compressFilePathList = []
    currentInputFileHanlder = open(currentInputFileName,"r")
    for line in currentInputFileHanlder.readlines():
        currentLineElements = line.strip().split("-")
        currentCompressedFileCompletedPath = firstPart + currentLineElements[0] + "/" + currentLineElements[1] + lastPart
        if currentCompressedFileCompletedPath not in compressFilePathDict:
            compressFilePathDict[currentCompressedFileCompletedPath] = 1
        else:
            pass
    currentInputFileHanlder.close()
    print "len(compressFilePathDict):",len(compressFilePathDict)
    compressFilePathList = compressFilePathDict.keys()
    compressFilePathList.sort(cmp=None, key=None, reverse=False)
    
    for currentCompressedFileCompletedPath in compressFilePathList:
         outputFileHandler.write(currentCompressedFileCompletedPath + "\n")
    print "outputFileName:",outputFileName,"DONE"
    outputFileHandler.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2DocumentWithTheirNumOfPostingsRecordedAndTheDistinctSetOfTermsONLY_For_DEBUG_20130926Night"
inputFileHandler0 = open(inputFileName,"r")

# skip the headline
currentLine = inputFileHandler0.readline()

# the first data line
currentLine = inputFileHandler0.readline()
currentLineElements = currentLine.strip().split(" ")
print len(currentLineElements)
inputFileHandler0.close()
exit(1)
'''


# Updated by Wei on 2013/09/26 afternoon at school
# data analysis about the correctness of the output file
# analyze the following files:(after the break)
    # /data1/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirConnectedEdges_for_DEBUG_20130926Night
    # /data1/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirForwardIndex_for_DEBUG_20130926Night
    # /data1/obukai/the_new_trip_of_feature_generation/gov2DocumentWithTheirCompletedPostingSet_for_DEBUG_20130926Night
    # /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirXdocValues_for_DEBUG_20130926Night (No Used)

'''
inputFileName = "/data1/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirForwardIndex_for_DEBUG_20130926Night"
inputFileHandler0 = open(inputFileName,"r")
# skip the headline
currentLine = inputFileHandler0.readline()
# the first data line
currentLine = inputFileHandler0.readline()
currentLineElements = currentLine.strip().split(" ")

currentTrecID = currentLineElements[0]
currentDocSizeInWords = int( currentLineElements[1] )
currentNumOfPostingsRecorded = int( currentLineElements[2] )
print "currentNumOfPostingsRecorded: ",currentNumOfPostingsRecorded

if currentNumOfPostingsRecorded == len(currentLineElements[3:]):
    print "Pass"
else:
    print "Problem"
    exit(1)


inputFileHandler0.close()
exit(1)
'''

'''
tempCounter = 0
inputFileName = "/data1/obukai/the_new_trip_of_feature_generation/gov2DocumentWithTheirCompletedPostingSet_for_DEBUG_20130926Night"
inputFileHandler0 = open(inputFileName,"r")
# skip the headline
currentLine = inputFileHandler0.readline()
# the first data line
currentLine = inputFileHandler0.readline()
currentLineElements = currentLine.strip().split(" ")
currentTrecID = currentLineElements[0]
currentDocSizeInWords = int( currentLineElements[1] )
currentDocSizeInWords2 = int( currentLineElements[2] )
if currentDocSizeInWords2 == len(currentLineElements[3:]):
    for tupleInStringFormat in currentLineElements[3:]:
        term = tupleInStringFormat.strip().split(",")[0].split("(")[1]
        print "term:",term
        if term == "the":
            tempCounter += 1
else:
    print "Problem"
    exit(1)

print "tempCounter:",tempCounter
inputFileHandler0.close()
exit(1)
'''

'''
# Updated by Wei on 2013/09/24 morning at school
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset"
inputFileHandler0 = open(inputFileName,"r")

currentNumOfLines = 0
currentNumOfLines += 1
currentLine = inputFileHandler0.readline()

totalNumOfPostingsRecorded = 0
maxNumOfPostingsRecordedForIndividualDocument = 0
minNumOfPostingsRecordedForIndividualDocument = 99999
averageNumOfPostingsRecordedForIndividualDocument = 0
currentNumOfPostingsRecorded = 0

while currentLine:
    currentNumOfPostingsRecorded = int( currentLine.strip().split(" ")[1] ) 
    totalNumOfPostingsRecorded += currentNumOfPostingsRecorded
    if currentNumOfPostingsRecorded > maxNumOfPostingsRecordedForIndividualDocument:
        maxNumOfPostingsRecordedForIndividualDocument = currentNumOfPostingsRecorded
    
    if currentNumOfPostingsRecorded < minNumOfPostingsRecordedForIndividualDocument:
        minNumOfPostingsRecordedForIndividualDocument = currentNumOfPostingsRecorded 

    currentNumOfLines += 1    
    currentLine = inputFileHandler0.readline()
    
print "totalNumOfPostingsRecorded:",totalNumOfPostingsRecorded
print "maxNumOfPostingsRecordedForIndividualDocument:",maxNumOfPostingsRecordedForIndividualDocument
print "minNumOfPostingsRecordedForIndividualDocument:",minNumOfPostingsRecordedForIndividualDocument
print "averageNumOfPostingsRecordedForIndividualDocument:",totalNumOfPostingsRecorded / currentNumOfLines
print "current Num Of Lines:",currentNumOfLines
inputFileHandler0.close()
exit(1);
'''


'''
# key: doc
# value: no use
docDict = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KResultsWithRelatedPieceNum_sortedByQID"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[2]
    currentDoc = lineElements[3]
    if currentTerm == "so":
        if currentDoc not in docDict:
            docDict[currentDoc] = 1
print "len(docDict):",len(docDict)
inputFileHandler0.close()
exit(1)
'''

'''
allTermsDict = {}
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsONLY.txt"
inputFileHandler0 = open(inputFileName1,"r")
for line in inputFileHandler0.readlines():
    currentTerm = line.strip()
    if currentTerm not in allTermsDict:
        allTermsDict[currentTerm] = 1
    else:
        exit(1)
print "len(allTermsDict):",len(allTermsDict)
inputFileHandler0.close()

head95KTermsDict = {}
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsONLY_from_head_95K_queries_ALL"
inputFileHandler0 = open(inputFileName2,"r")
for line in inputFileHandler0.readlines():
    currentTerm = line.strip()
    if currentTerm not in head95KTermsDict:
        head95KTermsDict[currentTerm] = 1
    else:
        exit(1)
print "len(head95KTermsDict):",len(head95KTermsDict)
inputFileHandler0.close()

tail5KTermsDict = {}
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsONLY_from_tail_5K_queries_ALL"
outputFileHanlder = open(outputFileName,"w")
for term in allTermsDict:
    if term in head95KTermsDict:
        pass
    else:
        tail5KTermsDict[term] = 1
        
termList = []
termList = tail5KTermsDict.keys()
termList.sort(cmp=None, key=None, reverse=False)
for term in termList:
    outputFileHanlder.write(term + "\n")

outputFileHanlder.close()

print "len(allTermsDict): ",len(allTermsDict)
print "len(head95KTermsDict): ",len(head95KTermsDict)
print "len(tail5KTermsDict): ",len(tail5KTermsDict)

print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
exit(1)
'''

'''
print "Let's verify some info first"
# key: docID
# value: term
currentDocDict = {}
postingDict = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KResultsWithRelatedPieceNum_sortedByQID"
inputFileHanlder = open(inputFileName,"r")
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentDocID = lineElements[1]
    currentTerm = lineElements[2]
    currentPostingKey = currentTerm + "_" + currentDocID
    
    if currentDocID not in currentDocDict:
        currentDocDict[currentDocID] = 1
    else:
        currentDocDict[currentDocID] += 1
    
    if currentPostingKey not in postingDict:
        postingDict[currentPostingKey] = 1
    else:
        postingDict[currentPostingKey] += 1

print "# of unique docIDs: ",len(currentDocDict)
print "# of document results: ","46772"
print "# of unique postings: ",len(postingDict)
inputFileHanlder.close()
exit(1)
'''



'''
termWithTermIDDict = {}

inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
inputFileHandler0 = open(inputFileName1,"r")
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermIDInStringFormat = currentLineElements[0]
    currentTermInStringFormat = currentLineElements[1]
    if currentTermInStringFormat not in termWithTermIDDict:
        termWithTermIDDict[currentTermInStringFormat] = currentTermIDInStringFormat
    else:
        print "Duplicate terms"
        exit(1)
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "len(termWithTermIDDict):",len(termWithTermIDDict)
print "termWithTermIDDict['000']:",termWithTermIDDict['000']

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TermsBothAppearIn100KANDLexicionWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
inputFileHandler0 = open(inputFileName2,"r")
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TermsBothAppearIn100KANDLexicionWithTermID"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    outputFileHandler.write( termWithTermIDDict[currentTerm] + " " + currentTerm + "\n")

inputFileHandler0.close()
outputFileHandler.close()

print "inputFileName1: ",inputFileName1
print "inputFileName2: ",inputFileName2
print "outputFileName: ",outputFileName
exit(1)
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
outputFileHandler = open(outputFileName,"w")

currentLine = inputFileHandler0.readline()
lineIndex = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTerm = currentLineElements[0]
    outputFileHandler.write(str(lineIndex) + " " + currentTerm + "\n")
    
    lineIndex += 1
    currentLine = inputFileHandler0.readline()
    
inputFileHandler0.close()
outputFileHandler.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/trecID_docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
totalNumOfPostings = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentNumOfPostings = int( currentLineElements[1] )
    totalNumOfPostings += currentNumOfPostings
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "totalNumOfPostings:",totalNumOfPostings
exit(1)
'''

'''
termDict = {}

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tempFile"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    term = line.strip()
    if term not in termDict:
        termDict[term] = 1
    else:
        print "critical error"
        exit(1)
inputFileHandler0.close()
print "len(termDict):",len(termDict)

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TermsBothAppearIn100KANDLexicionWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
outputFileHanlder = open(outputFileName,"w")

currentLine = inputFileHandler0.readline()
while currentLine:
    currentTerm = currentLine.strip().split(" ")[0]
    if currentTerm in termDict:
        outputFileHanlder.write(currentLine)
    else:
        pass
    currentLine = inputFileHandler0.readline()

inputFileHandler0.close()
outputFileHanlder.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tempSetOfTermsForDebuggingMultipleExternalIndexAccess"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    print len(line.strip().split(" "))
inputFileHandler0.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/forwardIndexExperimentalRepresentation20130909_OLD"
inputFileHandler0 = open(inputFileName,"r")

outputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tempSetOfTermsForDebuggingMultipleExternalIndexAccess"
outputFileHanlder2 = open(outputFileName2,"w")

totalNumOfPostingsCounted = 0
currentOutputLine = ""
currentDocTermProbabilityDict = {}

currentLine = inputFileHandler0.readline()
while currentLine:
    
    currentLineElements = currentLine.strip().split(" ")
    currentDocTrecID = currentLineElements[0]
    
    print "currentDocTrecID:",currentDocTrecID
    print "# of words in doc:",int( len(currentLineElements[2:])/3 )
    # Updated by Wei 2013/09/09 night
    # This document identifier can be as the trecID external, or the docID internal, OR simply just the docIndex
    currentDocIdentifier = currentDocTrecID
    numOfPostingsRecordedForCurrentDocument = int(currentLineElements[1])
    
    
    
    ##############################################
    
    
    if numOfPostingsRecordedForCurrentDocument != len(currentLineElements[2:])/3:
        print "critical format problem in the input file"
        exit(1)
    else:
        # OK for passing
        pass
    
    baseIndex = 2
    step = 3
    for i in range(0,numOfPostingsRecordedForCurrentDocument):
        currentTerm = currentLineElements[baseIndex + step * i]
        currentTermScore = float( currentLineElements[baseIndex + step * i + 1] )
        currentTermFakePart1Probability = -1.0
        if currentTerm not in currentDocTermProbabilityDict:
            currentTermFakePart1Probability = random.random()
            currentDocTermProbabilityDict[currentTerm] = currentTermFakePart1Probability
        else:
            currentTermFakePart1Probability = currentDocTermProbabilityDict[currentTerm]
        
        
        totalNumOfPostingsCounted += 1
    
    for term in currentDocTermProbabilityDict:
        outputFileHanlder2.write(term + "\n")
    
    print "# of postings in doc:",len(currentDocTermProbabilityDict)
    print
    
    currentLine = inputFileHandler0.readline()
    
    # for DEBUG only
    break

print "Overall Processing Stats:"
print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
inputFileHandler0.close()
outputFileHanlder2.close()
exit(1)
'''



'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/forwardIndexExperimentalRepresentation20130909_OLD"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/forwardIndexExperimentalRepresentation20130909_NEW"
outputFileHanlder = open(outputFileName,"w")

totalNumOfPostingsCounted = 0
currentOutputLine = ""
currentDocTermProbabilityDict = {}

currentLine = inputFileHandler0.readline()
while currentLine:
    currentOutputLine = ""
    currentLineElements = currentLine.strip().split(" ")
    currentDocTrecID = currentLineElements[0]
    
    print "currentDocTrecID:",currentDocTrecID
    print "# of words in doc:",int( len(currentLineElements[2:])/3 )
    # Updated by Wei 2013/09/09 night
    # This document identifier can be as the trecID external, or the docID internal, OR simply just the docIndex
    currentDocIdentifier = currentDocTrecID
    numOfPostingsRecordedForCurrentDocument = int(currentLineElements[1])
    
    # column0: currentDocTrecID (external)
    # column1: currentDocDicID (internal)
    # column2: numOfPostingsRecordedForCurrentDocument
    currentOutputLine += currentDocTrecID + " " + "N/A" + " " + str(numOfPostingsRecordedForCurrentDocument) + " "
    
    ##############################################
    
    
    if numOfPostingsRecordedForCurrentDocument != len(currentLineElements[2:])/3:
        print "critical format problem in the input file"
        exit(1)
    else:
        # OK for passing
        pass
    
    baseIndex = 2
    step = 3
    for i in range(0,numOfPostingsRecordedForCurrentDocument):
        currentTerm = currentLineElements[baseIndex + step * i]
        currentTermScore = float( currentLineElements[baseIndex + step * i + 1] )
        currentTermFakePart1Probability = -1.0
        if currentTerm not in currentDocTermProbabilityDict:
            currentTermFakePart1Probability = random.random()
            currentDocTermProbabilityDict[currentTerm] = currentTermFakePart1Probability
        else:
            currentTermFakePart1Probability = currentDocTermProbabilityDict[currentTerm]
        
        currentOutputLine += currentTerm + " " + str(currentTermScore) + " " + str(currentTermFakePart1Probability) + " "
        totalNumOfPostingsCounted += 1
    
    currentOutputLine = currentOutputLine.strip() + "\n"
    outputFileHanlder.write(currentOutputLine)
    
    print "# of postings in doc:",len(currentDocTermProbabilityDict)
    print
        
    currentLine = inputFileHandler0.readline()

print "Overall Processing Stats:"
print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
inputFileHandler0.close()
outputFileHanlder.close()
exit(1)
'''

'''
print "sys.float_info.max:",sys.float_info.max
print "sys.float_info.min:",sys.float_info.min
exit(1)

d = {
     "int": 0,
     "float": 0.0,
     "dict": dict(),
     "set": set(),
     "tuple": tuple(),
     "list": list(),
     "str": "a"
}

for k, v in sorted(d.iteritems()):
    print k, sys.getsizeof(v)

exit(1)
'''


'''
for i in range(0,10):
    print random.random()
exit(1)
'''

'''
for i in range(0,10):
    print i
exit(1)
'''

'''
# logic of finding the largest element in the list
queryTermsIn100KQueriesDict = {}
ifn = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputFileHandler0 = open(ifn,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    # ignore lineElements[1]
    if currentTerm not in queryTermsIn100KQueriesDict:
        queryTermsIn100KQueriesDict[currentTerm] = 0
print "len(queryTermsIn100KQueriesDict):",len(queryTermsIn100KQueriesDict)
inputFileHandler0.close()


inputFileName1 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList20130828_with_set_of_probability_added_DEBUG"
inputFileHandler0 = open(inputFileName1,"r")

currentMaxScore = 0.0
currentMaxScoreLine = ""

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    if currentTerm in queryTermsIn100KQueriesDict:
        currentLineScore = float(lineElements[3])
        if currentLineScore > currentMaxScore:
            currentMaxScore = currentLineScore
            currentMaxScoreLine = line
    else:
        pass

print "currentMaxScoreLine:",currentMaxScoreLine.strip()
inputFileHandler0.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList20130828_with_local_index_for_each_term_added"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList20130828_with_local_index_for_each_term_added_for_DEBUG_using_term_gov"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    if currentTerm != "gov":
        pass
    else:
        outputFileHandler.write(line)

print "inputFileName:",inputFileName
print "outputFileName:",outputFileName

inputFileHandler0.close()
outputFileHandler.close()
exit(1)
'''

'''
x = np.random.random(10)
y = np.random.random(10)

slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
print "slope:",slope
print "intercept:",intercept
print "r-squared:", r_value**2
print "p_value:",p_value
print "std_err:",std_err
exit(1)
'''


'''
# This program is currently under construction since 2013/08/29 morning by Wei
# There are still some bugs in that, the reason I dump it is because I want to do it manually. 
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/partialBM25ScoreBucketingPrelimanryResultsV2_RAW.csv"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/partialBM25ScoreBucketingPrelimanryResultsV2_Combined.csv"
outputFileHandler = open(outputFileName,"w")

currentAccumulatedDenominator = 0
currentAccumulatedNumerator = 0

combineClassLowerBoundValue = 0
combineClassUpperBoundValue = 0

combineClassLowerBoundLabel = 0
combineClassUpperBoundLabel = 0

newCombineClassFlag = True
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentClassLabel = int(lineElements[0])
    if newCombineClassFlag:
        combineClassLowerBoundLabel = currentClassLabel
        newCombineClassFlag = False
    classLowerBound = float(lineElements[1])
    currentDenominator = int(lineElements[2])
    currentNumerator = int(lineElements[2])
    currentAccumulatedDenominator += currentDenominator
    currentAccumulatedNumerator += currentNumerator
    if currentAccumulatedNumerator > 200:
        # it is time to combine those lines
        newCombineClassFlag =True
        combineClassUpperBoundLabel = currentClassLabel
        outputLine =  str(combineClassLowerBoundLabel) + "_" + str(combineClassUpperBoundLabel) + " " 
        outputLine += str(combineClassLowerBoundValue) + "_" + str(combineClassUpperBoundValue) + " "
        outputLine += str(currentAccumulatedDenominator) + " "
        outputLine += str(currentAccumulatedNumerator) + " "
        outputLine += "\n"
        outputFileHandler.write(outputLine)
        
        
inputFileHandler0.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/ALLRelatedPostingsBucketingIntoClassesFromHead95KQueries"
inputFileHanlder = open(inputFileName,"r")
currentAccumulatedFreq = 0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentAccumulatedFreq += int( lineElements[1] )
print "currentAccumulatedFreq:",currentAccumulatedFreq
inputFileHanlder.close()
exit(1)
'''

'''
print "Program Begins..."

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/debug"
inputFileHandler0 = open(inputFileName,"r")

totalNumOfPostingsCounted = 0

currentTerm = ""
currentRealFreqOfTermInHead95KQueries = 0
currentTermLengthOfList = 0
tripleCounter = 0

line = inputFileHandler0.readline()

while line:
    if line.strip().startswith("totalNumOfPostingsCounted:"):
        break
    
    if line.strip().startswith("------>term:"):
        lineElements = line.strip().split(" ")
        currentTerm = lineElements[-1] 
        line = inputFileHandler0.readline()
    
    if line.strip().startswith("------>real freq of term in head 95K queries:"):
        lineElements = line.strip().split(" ")
        currentRealFreqOfTermInHead95KQueries = int(lineElements[-1])
        line = inputFileHandler0.readline()
    
    if line.strip().startswith("------>currentTermInvertedIndexCorrectLength:"):
        lineElements = line.strip().split(" ")
        currentTermLengthOfList = int(lineElements[1])
    
    if currentTerm.strip() != "":
        if currentRealFreqOfTermInHead95KQueries != 0:
            if currentTermLengthOfList != 0:
                totalNumOfPostingsCounted += currentRealFreqOfTermInHead95KQueries * currentTermLengthOfList
                tripleCounter += 1
                print "Processing term:",currentTerm
                print "currentRealFreqOfTermInHead95KQueries:",currentRealFreqOfTermInHead95KQueries
                print "currentTermLengthOfList:",currentTermLengthOfList
                print
            else:
                line = inputFileHandler0.readline()
                if line.strip().endswith("is NOT in the lexicon."):
                    pass
                else:
                    print "DEBUG mode:"
                    print "currentTerm:",currentTerm
                    print "currentRealFreqOfTermInHead95KQueries:",currentRealFreqOfTermInHead95KQueries
                    print "currentTermLengthOfList:",currentTermLengthOfList
                    exit(1)                    
        else:
            print "DEBUG mode:"
            print "currentTerm:",currentTerm
            print "currentRealFreqOfTermInHead95KQueries:",currentRealFreqOfTermInHead95KQueries
            print "currentTermLengthOfList:",currentTermLengthOfList
            exit(1)
    else:
        pass
        #print "currentTerm:",currentTerm
        #print "currentRealFreqOfTermInHead95KQueries:",currentRealFreqOfTermInHead95KQueries
        #print "currentTermLengthOfList:",currentTermLengthOfList
        #print "some problems happened :)"
        #exit(1)
    
    line = inputFileHandler0.readline()
    
print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted
print "tripleCounter:",tripleCounter
print "Program Ends."
exit(1)
'''

'''
# This part of logic has been copied to the program called: 
# /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/stepX_gov2_partialBM25_hit_list_distribution_analysis.py
print "Program Begins..."

# key: class label in int format
# value: currentLowerBound in float format
classLabelsWithTheirLowerBoundDict = {}

# key: class label in int format
# value: # of ALL related postings belonging to this class
classLabelsWithNumOfALLRelatedPostingsDict = {}

# key: class label in int format
# value: # of TOP10 related postings belonging to this class
classLabelsWithNumOfTOP10RelatedPostingsDict = {}

sMin = 0.001
sMax = 19.746
stepFactor = 1.2

currentLowerBound = sMin
classLabel = 0
while currentLowerBound <= 25:
    # print classLabel,currentLowerBound
    if classLabel not in classLabelsWithTheirLowerBoundDict:
        classLabelsWithTheirLowerBoundDict[classLabel] = currentLowerBound
        classLabelsWithNumOfALLRelatedPostingsDict[classLabel] = 0
        classLabelsWithNumOfTOP10RelatedPostingsDict[classLabel] = 0
    currentLowerBound = currentLowerBound * 1.2
    classLabel += 1

print "len(classLabelsWithTheirLowerBoundDict):",len(classLabelsWithTheirLowerBoundDict)

ifn = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/ALLRelatedPostingsBucketingIntoClassesFromHead95KQueries"
inputFileHandler0 = open(ifn,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentOriginalRoundingClassInFloatFormat = float(lineElements[0])
    numOfPostingsInCurrentRoundingClass = int( lineElements[1] )
    
    if numOfPostingsInCurrentRoundingClass != 0:
        beenCountedFlag = False
        for classLabel in classLabelsWithTheirLowerBoundDict:
            # for DEBUG
            # print "currentOriginalRoundingClassInFloatFormat:",currentOriginalRoundingClassInFloatFormat
            # print "classLabelsWithTheirLowerBoundDict[classLabel]:",classLabelsWithTheirLowerBoundDict[classLabel]
            if classLabel == 0:
                pass
                # for DEBUG
                # print "classLabelsWithTheirLowerBoundDict[classLabel-1]:",classLabelsWithTheirLowerBoundDict[classLabel-1]
            elif classLabel != 0:
                if currentOriginalRoundingClassInFloatFormat < classLabelsWithTheirLowerBoundDict[classLabel] and currentOriginalRoundingClassInFloatFormat >= classLabelsWithTheirLowerBoundDict[classLabel-1]:
                    classLabelsWithNumOfALLRelatedPostingsDict[classLabel-1] += numOfPostingsInCurrentRoundingClass
                    beenCountedFlag = True
                else:
                    pass
            else:
                pass # just don't care
        if beenCountedFlag:
            pass
        else:
            print "currentOriginalRoundingClassInFloatFormat:",currentOriginalRoundingClassInFloatFormat
            print "numOfPostingsInCurrentRoundingClass:",numOfPostingsInCurrentRoundingClass
            exit(1)
inputFileHandler0.close()

inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TOP10RelatedPostingsBucketingIntoClassesFromHead95KQueries"
inputFileHandler0 = open(inputFileName1,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentOriginalRoundingClassInFloatFormat = float(lineElements[0])
    numOfPostingsInCurrentRoundingClass = int( lineElements[1] )
    if numOfPostingsInCurrentRoundingClass != 0:
        for classLabel in classLabelsWithTheirLowerBoundDict:
            # for DEBUG
            # print "currentOriginalRoundingClassInFloatFormat:",currentOriginalRoundingClassInFloatFormat
            # print "classLabelsWithTheirLowerBoundDict[classLabel]:",classLabelsWithTheirLowerBoundDict[classLabel]
            if classLabel != 0:
                pass
                # for DEBUG
                # print "classLabelsWithTheirLowerBoundDict[classLabel-1]:",classLabelsWithTheirLowerBoundDict[classLabel-1]
            
            if classLabel != 0:
                if currentOriginalRoundingClassInFloatFormat < classLabelsWithTheirLowerBoundDict[classLabel] and currentOriginalRoundingClassInFloatFormat >= classLabelsWithTheirLowerBoundDict[classLabel-1]:
                    classLabelsWithNumOfTOP10RelatedPostingsDict[classLabel-1] += numOfPostingsInCurrentRoundingClass
                else:
                    pass
            else:
                pass # just don't care
inputFileHandler0.close()

print "len(classLabelsWithTheirLowerBoundDict):",len(classLabelsWithTheirLowerBoundDict)
print "len(classLabelsWithNumOfALLRelatedPostingsDict):",len(classLabelsWithNumOfALLRelatedPostingsDict)
print "len(classLabelsWithNumOfTOP10RelatedPostingsDict):",len(classLabelsWithNumOfTOP10RelatedPostingsDict)
assert len(classLabelsWithTheirLowerBoundDict) == len(classLabelsWithNumOfALLRelatedPostingsDict)

for i in range( 0,len(classLabelsWithTheirLowerBoundDict) ):
    if classLabelsWithNumOfALLRelatedPostingsDict[i] != 0:
        print i,classLabelsWithTheirLowerBoundDict[i],classLabelsWithNumOfALLRelatedPostingsDict[i],classLabelsWithNumOfTOP10RelatedPostingsDict[i],classLabelsWithNumOfTOP10RelatedPostingsDict[i]/classLabelsWithNumOfALLRelatedPostingsDict[i]
    else:
        print i,classLabelsWithTheirLowerBoundDict[i],classLabelsWithNumOfALLRelatedPostingsDict[i],classLabelsWithNumOfTOP10RelatedPostingsDict[i],0

# check
# total Num Of ALL Related Postings Counted: 5054184114
# total Num Of TOP10 Related Postings Counted: 3632263
tempSumValue1 = 0
tempSumValue2 = 0
for i in range( 0,len(classLabelsWithTheirLowerBoundDict) ):
    tempSumValue1 += classLabelsWithNumOfALLRelatedPostingsDict[i]
    tempSumValue2 += classLabelsWithNumOfTOP10RelatedPostingsDict[i]
print "ALLRelatedPostings:",tempSumValue1
print "TOP10RelatedPostings:",tempSumValue2
print "Probability for a random posting to be in the TOP10:",tempSumValue2/tempSumValue1
print "sMin:",sMin
print "sMax:",sMax
print "stepFactor:",stepFactor
assert tempSumValue1 == 5054184114
assert tempSumValue2 == 3632263
print "Program Ends."
exit(1)
'''


'''
print "Program Begins..."
# 2 Billion so far, still need to wait for the program to run. The program itself is called:
# /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/govX_gov2_partialBM25_hit_list_distribution_analysis.py
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/debug"
inputFileHandler0 = open(inputFileName,"r")

totalNumOfPostings = 0
for index,line in enumerate( inputFileHandler0.readlines() ):
    if line.strip().startswith("------>currentTermInvertedIndexCorrectLength:"):
        lineElements = line.strip().split(" ")
        currentNumOfPostings = int(lineElements[1])
        totalNumOfPostings += currentNumOfPostings
print "totalNumOfPostings:",totalNumOfPostings
print "Program Ends."
exit(1)
'''

'''
print "Program Begins..."
# This task is very simple, answer prof's question, get that answer done and direct reply. Very simple.
# This logic is going to answer the question4 by prof
TOTAL_NUM_OF_TERM_OCCURRENCES_IN_100K_QUERIES = 413533

ifn = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_sorted_by_real_freq_decreasing_order"
inputFileHandler0 = open(ifn,"r")
# tempCounter = 0

# init the variable: TOPKNumbersOutputingList 
TOPKNumbersOutputingList = []

# fill the variable: TOPKNumbersOutputingList
currentTOPKValue = 5
step = 2
for i in range(0,14):
    TOPKNumbersOutputingList.append(currentTOPKValue)
    currentTOPKValue = currentTOPKValue * step
# TOPKNumbersOutputingList.append(1000)
# check the variable: TOPKNumbersOutputingList
print "TOPKNumbersOutputingList:",TOPKNumbersOutputingList
accumulatedTermRealFreqIn100KQueries = 0

for index,line in enumerate( inputFileHandler0.readlines() ):
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    currentTermRealFreqIn100KQueries = int(lineElements[1])
    accumulatedTermRealFreqIn100KQueries += currentTermRealFreqIn100KQueries
    
    if index+1 in TOPKNumbersOutputingList:
        print index+1,accumulatedTermRealFreqIn100KQueries,accumulatedTermRealFreqIn100KQueries / TOTAL_NUM_OF_TERM_OCCURRENCES_IN_100K_QUERIES

print index+1,accumulatedTermRealFreqIn100KQueries,accumulatedTermRealFreqIn100KQueries / TOTAL_NUM_OF_TERM_OCCURRENCES_IN_100K_QUERIES
inputFileHandler0.close()
print "Program Ends."
exit(1)
'''

'''
print "Program Begins..."
TOTAL_NUM_OF_TERM_OCCURRENCES_IN_100K_QUERIES = 413533

# This task is very simple, answer prof's question, get that answer done and direct reply. Very simple.
# The file TOP1000 lines: 
# /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_tail_1000.txt


# init the variable: TOPKNumbersOutputingList 
TOPKNumbersOutputingList = []

# fill the variable: TOPKNumbersOutputingList
currentTOPKValue = 5
step = 2
for i in range(0,24):
    TOPKNumbersOutputingList.append(currentTOPKValue)
    currentTOPKValue = currentTOPKValue * step
# TOPKNumbersOutputingList.append(1000)
# check the variable: TOPKNumbersOutputingList
print "TOPKNumbersOutputingList:",TOPKNumbersOutputingList

queryTermsWithRealFreqIn100KQueriesDict = {}
# num of term occurrences in this 100K queries: 413533
ifn = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_sorted_by_real_freq_decreasing_order"
inputFileHandler0 = open(ifn,"r")
# tempCounter = 0
for index,line in enumerate( inputFileHandler0.readlines() ):
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    currentTermRealFreqIn100KQueries = int(lineElements[1])
    if currentTerm not in queryTermsWithRealFreqIn100KQueriesDict:
        queryTermsWithRealFreqIn100KQueriesDict[currentTerm] = currentTermRealFreqIn100KQueries
    else:
        print "duplicated terms found"
        exit(1)
    # tempCounter += int( lineElements[1] )
# print "tempCounter:",tempCounter
inputFileHandler0.close()
print "len(queryTermsWithRealFreqIn100KQueriesDict):",len(queryTermsWithRealFreqIn100KQueriesDict)
print "queryTermsWithRealFreqIn100KQueriesDict['of']:",queryTermsWithRealFreqIn100KQueriesDict['of']


# for production
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_decreasing_order.txt"
# for debug
# inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_decreasing_order_tail_1000.txt"
inputFileHanlder = open(inputFileName1,"r")

# outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TOP1000TermsInTermsOfLengthOfListForGOV2Index_by_BAGS_OF_TERMS.csv"
# outputFileHandler = open(outputFileName,"w")

currentAccumulatedNumOfOccurencesCounted = 0
currentLine = 0 # init 
numOfQueriesFallingThrough = 0

currentLine = inputFileHanlder.readline() # read the 1st line
numOfQueriesFallingThrough += 1

while currentLine:
    lineElements = currentLine.strip().split(" ")
    currentTerm = lineElements[0]
    if currentTerm in queryTermsWithRealFreqIn100KQueriesDict:
        currentAccumulatedNumOfOccurencesCounted += queryTermsWithRealFreqIn100KQueriesDict[currentTerm]
    
    if numOfQueriesFallingThrough in TOPKNumbersOutputingList: 
        percentageOfTotalLengthOfListOfALL = currentAccumulatedNumOfOccurencesCounted / TOTAL_NUM_OF_TERM_OCCURRENCES_IN_100K_QUERIES 
        print numOfQueriesFallingThrough,currentAccumulatedNumOfOccurencesCounted,percentageOfTotalLengthOfListOfALL
    
    currentLine = inputFileHanlder.readline()
    numOfQueriesFallingThrough += 1
print numOfQueriesFallingThrough,currentAccumulatedNumOfOccurencesCounted,percentageOfTotalLengthOfListOfALL

print "ifn:",ifn
print "inputFileName1:",inputFileName1
inputFileHanlder.close()
# outputFileHandler.close()
print "Program Ends."
exit(1)
'''


'''
TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX = 6451948010 # 100% of the whole index (all the unseen term lists have been added)

# init the variable: TOPKNumbersOutputingList 
TOPKNumbersOutputingIndexList = []

# fill the variable: TOPKNumbersOutputingList
currentTOPKValue = 5
step = 2
for i in range(0,14):
    TOPKNumbersOutputingIndexList.append(currentTOPKValue)
    currentTOPKValue = currentTOPKValue * step
print "TOPKNumbersOutputingIndexList:",TOPKNumbersOutputingIndexList

# key: term in string format
# value: length of the list in int format
queryTermWithLengthOfListDict = {}
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputFileHanlder = open(inputFileName1,"r")
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    currentTermLengthOfList = int(lineElements[1])
    if currentTerm not in queryTermWithLengthOfListDict:
        queryTermWithLengthOfListDict[currentTerm] = currentTermLengthOfList
    else:
        print "duplicated terms."
        exit(1)

# the top 5 query terms which have been searched for many times
# (1) of 11587
# (2) in 8006
# (3) and 6066
# (4) for 5705
# (5) the 4542


print "len(queryTermWithLengthOfListDict):",len(queryTermWithLengthOfListDict)
print "queryTermWithLengthOfListDict['of']:",queryTermWithLengthOfListDict['of']
print "queryTermWithLengthOfListDict['in']:",queryTermWithLengthOfListDict['in']
print "queryTermWithLengthOfListDict['and']:",queryTermWithLengthOfListDict['and']
print "queryTermWithLengthOfListDict['for']:",queryTermWithLengthOfListDict['for']
print "queryTermWithLengthOfListDict['the']:",queryTermWithLengthOfListDict['the']
inputFileHanlder.close()


inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_sorted_by_real_freq_decreasing_order"
inputFileHandler0 = open(inputFileName2,"r")
accumulatedNumOfPostings = 0
for index,line in enumerate( inputFileHandler0.readlines() ):
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    # get the length of list from another dict
    if currentTerm not in queryTermWithLengthOfListDict:
        print "system error."
        exit(1)
    else:
        accumulatedNumOfPostings += queryTermWithLengthOfListDict[currentTerm]

    if index+1 in TOPKNumbersOutputingIndexList:
        print index+1,accumulatedNumOfPostings,accumulatedNumOfPostings/TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX
print index+1,accumulatedNumOfPostings,accumulatedNumOfPostings/TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX
inputFileHandler0.close()
exit(1)
'''

'''
print "Program Begins..."
TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX = 6451948010 # 100% of the whole index (all the unseen term lists have been added)

# This task is very simple, answer prof's question, get that answer done and direct reply. Very simple.
# The file TOP1000 lines: 
# /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_tail_1000.txt


# init the variable: TOPKNumbersOutputingList 
TOPKNumbersOutputingList = []

# fill the variable: TOPKNumbersOutputingList
currentTOPKValue = 10
step = 2
for i in range(0,24):
    TOPKNumbersOutputingList.append(currentTOPKValue)
    currentTOPKValue = currentTOPKValue * step
# TOPKNumbersOutputingList.append(1000)
# check the variable: TOPKNumbersOutputingList
print "TOPKNumbersOutputingList:",TOPKNumbersOutputingList

# for production
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_decreasing_order.txt"
# for debug
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_decreasing_order_tail_1000.txt"
inputFileHanlder = open(inputFileName,"r")

# outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TOP1000TermsInTermsOfLengthOfListForGOV2Index_by_BAGS_OF_TERMS.csv"
# outputFileHandler = open(outputFileName,"w")

currentAccumulatedNumOfPostingsCounted = 0
currentLine = 0 # init 
numOfQueriesFallingThrough = 0

currentLine = inputFileHanlder.readline() # read the 1st line
numOfQueriesFallingThrough += 1

while currentLine:
    lineElements = currentLine.strip().split(" ")
    currentTermLengthOfList = int(lineElements[1])
    currentAccumulatedNumOfPostingsCounted += currentTermLengthOfList
    
    if numOfQueriesFallingThrough in TOPKNumbersOutputingList: 
        percentageOfTotalLengthOfListOfALL = currentAccumulatedNumOfPostingsCounted / TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX 
        print numOfQueriesFallingThrough,currentAccumulatedNumOfPostingsCounted,percentageOfTotalLengthOfListOfALL
    
    currentLine = inputFileHanlder.readline()
    numOfQueriesFallingThrough += 1

print "inputFileName:",inputFileName
# print "outputFileName:",outputFileName
inputFileHanlder.close()
# outputFileHandler.close()
print "Program Ends."
exit(1)
'''


'''
print "Program Begins..."
dict = {}
dict[0] = 0
dict[1] = 1
del dict
dict = None
gc.collect()
print "Program Ends."
'''


'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KWithNumOfResultsEvaluated"
inputFileHanlder = open(inputFileName,"r")
totalNumOfResults = 0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentNumOfResults = int(lineElements[1])
    totalNumOfResults += currentNumOfResults
print "totalNumOfResults:",totalNumOfResults
inputFileHanlder.close()
exit(1)
'''

'''
# Some numbers:
# Just read all the lines(37M) one by one into main memory, it takes 26 seconds.
# Just split the list into elements(37M lines) and it takes 55 seconds.
# Take 630 seconds to assign some variables in memory
# It takes about 13.2 mins only to load the info to the main memory


startingTime = time.clock()
endingTime = (time.clock() - startingTime)*1000
print "endingTime:",endingTime
   
# code part in test begins.
# key: term in string format
# value: class label in int format
termClassLabelDict = {}

# key: term in string format
# value: a dict
    # key: pieceNumber
    # value: numOfPostingsInThisPiece
termPiecesInfoDict = {}

# This file maybe NOT big enough
# option1: 100K query terms ONLY
# ifn = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollectionANDClassLabelANDNumOfPostingsInEachPieces20130821"
# option2: 100K query terms + some unseen terms for debug
# ifn = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsPlusSomeUnseenTermsWithTermFreqInCollectionANDClassLabelANDNumOfPostingsInEachPieces20130821"
# option3: whole lexicon terms for production
ifn = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
inputFileHandler0 = open(ifn,"r")

numOfQueriesFallingThrough = 0 # init

currentLine = inputFileHandler0.readline()
numOfQueriesFallingThrough += 1

while currentLine:
    
    currentLineElements = currentLine.strip().split(" ")
    
    
    currentTerm = currentLineElements[0]
    # ignore currentLineElements[1], cause we will some other sources provide this info about length of the inverted list for this term
    currentTermClassLabelInIntFormat = int( currentLineElements[2] )
    currentTermNumOfPiecesHave = int( currentLineElements[3] )
    
    # fill the variable: termClassLabelDict
    if currentTerm not in termClassLabelDict:
        termClassLabelDict[currentTerm] = currentTermClassLabelInIntFormat 
    
    if currentTerm not in termPiecesInfoDict:
        termPiecesInfoDict[currentTerm] = {}
    
    
    # fill the variable: termPiecesInfoDict
    baseIndex = 4
    for i in range( 0,len( currentLineElements[4:]),2):
        currentPieceNum = int( currentLineElements[4+i] )
        currentNumOfPostingsInThiePiece = int( currentLineElements[4+i+1])
        if currentPieceNum not in termPiecesInfoDict[currentTerm]:
            termPiecesInfoDict[currentTerm][currentPieceNum] = currentNumOfPostingsInThiePiece
        else:
            print "system error, mark1."
    
    
    currentLine = inputFileHandler0.readline()
    numOfQueriesFallingThrough += 1
    if numOfQueriesFallingThrough % 10000 == 0:
        print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough
        print "len(termClassLabelDict):",len(termClassLabelDict)
        print "len(termPiecesInfoDict):",len(termPiecesInfoDict) 
        endingTime = (time.clock() - startingTime)*1000
        print "endingTime:",endingTime
    

inputFileHandler0.close()
print "len(termClassLabelDict):",len(termClassLabelDict)
print "len(termPiecesInfoDict):",len(termPiecesInfoDict)
print "termClassLabelDict['0']:",termClassLabelDict['0']
print "termPiecesInfoDict['0']:",termPiecesInfoDict['0']
# print "termClassLabelDict['0120j4']:",termClassLabelDict['0120j4']
# print "termPiecesInfoDict['0120j4']:",termPiecesInfoDict['0120j4']
# len(termClassLabelDict): 38871
# len(termPiecesInfoDict): 38871
# termClassLabelDict['0']: 63
# termPiecesInfoDict['0']: {0: 4200166, 1: 2100083, 2: 1050041, 3: 525020, 4: 262510, 5: 131255, 6: 65627, 7: 32813, 8: 16406, 9: 8203, 10: 4101, 11: 2050, 12: 1025, 13: 512, 14: 256, 15: 128, 16: 64, 17: 73}
# termClassLabelDict['0120j4']: -1
# termPiecesInfoDict['0120j4']: {}
# code part in test ends...
exit(1)
'''





'''
testDict = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KResultsWithRelatedPieceNum_sortedByQID"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    docIDInStringFormat = lineElements[1]
    termInStringFormat = lineElements[2]
    key = docIDInStringFormat + "_" + termInStringFormat
    if key not in testDict:
        testDict[key] = 1

print "len(testDict):",len(testDict)
inputFileHandler0.close()
'''

'''
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130820"
outputFileHandler = open(outputFileName,"w")

# key: oldGoodTuringProbability In String Format
# value: freqOfFreq in int format
oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict = {}
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_good_turing_output"
inputFileHandler0 = open(inputFileName1,"r")
inputFileHandler0.readline()
inputFileHandler0.readline()
inputFileHandler0.readline()
inputFileHandler0.readline()
numOfQueriesFallingThrough = 1
currentLine = inputFileHandler0.readline()
while currentLine:
    lineElements = currentLine.strip().split(" ")
    freqOfFreq = int(lineElements[0])
    oldGoodTuringProbabilityInStringFormat = lineElements[3]
    if oldGoodTuringProbabilityInStringFormat not in oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict:
        oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict[oldGoodTuringProbabilityInStringFormat] = freqOfFreq
    currentLine = inputFileHandler0.readline()
    numOfQueriesFallingThrough += 1
    if numOfQueriesFallingThrough % 21 == 0:
        break
print "len(oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict):",len(oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict)
print "oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict:",oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict
inputFileHandler0.close()

# key: freqOfFreq in int format
# value: newGoodTuringProbability In String Format Dict
freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict = {}
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_0_1_95K_95%_good_turing_output"
inputFileHandler0 = open(inputFileName2,"r")
inputFileHandler0.readline()
inputFileHandler0.readline()
inputFileHandler0.readline()
inputFileHandler0.readline()
numOfQueriesFallingThrough = 1
currentLine = inputFileHandler0.readline()
while currentLine:
    lineElements = currentLine.strip().split(" ")
    freqOfFreq = int(lineElements[0])
    newGoodTuringProbabilityInStringFormat = lineElements[3]
    if freqOfFreq not in freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict:
        freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict[freqOfFreq] = newGoodTuringProbabilityInStringFormat
    currentLine = inputFileHandler0.readline()
    numOfQueriesFallingThrough += 1
    if numOfQueriesFallingThrough % 21 == 0:
        break
print "len(freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict):",len(freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict)
print "freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict:",freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict
inputFileHandler0.close()


inputFileName3 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130429"
inputFileHandler0 = open(inputFileName3,"r")

outputFileHandler.write(inputFileHandler0.readline())

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    oldGoodTuringProbabilityInStringFormat = lineElements[4]
    outputLine = ""
    if oldGoodTuringProbabilityInStringFormat in oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict:
        outputLine = lineElements[0] + " " + lineElements[1] + " " + lineElements[2] + " " + lineElements[3] + " " + freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict[ oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict[oldGoodTuringProbabilityInStringFormat] ] + "\n"
    else:
        outputLine = line
    outputFileHandler.write(outputLine)
    
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "inputFileName3:",inputFileName3
print "outputFileName:",outputFileName
    
inputFileHandler0.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130429"
inputFileHandler0 = open(inputFileName,"r")
inputFileHandler0.readline()

# key: probability1D in string format
# value: counter in int format
probabilityFor1DDict = {}

# key: probability2D in string format
# value: counter in int format
probabilityFor2DDict = {}

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    current1DProbabilityInStringFormat = lineElements[2]
    current2DProbabilityInStringFormat = lineElements[3]
    if current1DProbabilityInStringFormat not in probabilityFor1DDict:
        probabilityFor1DDict[current1DProbabilityInStringFormat] = 1
    else:
        pass
    
    if current2DProbabilityInStringFormat not in probabilityFor2DDict:
        probabilityFor2DDict[current2DProbabilityInStringFormat] = 1
    else:
        pass

print "len(probabilityFor1DDict):",len(probabilityFor1DDict)
print "len(probabilityFor2DDict):",len(probabilityFor2DDict)
inputFileHandler0.close()
exit(1)
'''

'''
for i in range(0,19):
    print i, 1 / pow(2,i+1)

exit(1)
'''

'''
# What is the query length distribution for the fake one, it seems that the fake one is too fake for me
# key: queryLength in int format
# value: freq in int format
queryLengthDistributionDict = {}
# option1
# inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/100KQueries_head_95K"
# option2
# inputFileName = "/data/jrodri04/lm/10M.95KQ.5gram.ModKN.txt"
# option3
# inputFileName = "/data/jrodri04/lm/30M.95KQ.ModKN.txt"
# option4
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/95KQueriesMachineGenerated3_20130818_using_new_fake_queryLog"

inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
numOfQueriesFallingThrough = 1
while currentLine:
    # option1
    # data = currentLine.strip().split(":")[1]
    # option2
    # data = currentLine.strip().lower()
    # option3
    data = currentLine.strip().split(":")[2]
    
    
    # processing option1 (the ORIGINAL one)
    # for i in range(0,len(data)):
    #    if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
    #        data = data[:i] + " " + data[i+1:]
    # queryContent = data
    
    
    # processing option2 (the simple split one)
    queryContent = data
    
    
    queryContentElements = queryContent.strip().split(" ") 
    
    currentQueryTermDict = {}
    for element in queryContentElements:
        if element.strip() != "":
            if element.strip() not in currentQueryTermDict:
                currentQueryTermDict[element.strip()] = 1
    currentQueryLength = len(currentQueryTermDict)
    if currentQueryLength not in queryLengthDistributionDict:
        if currentQueryLength == 0:
            print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough,"*",currentLine.strip(),"*"
            # print currentQueryTermDict,"*"
            # print "*",currentLine.strip(),"*"
            # print
        queryLengthDistributionDict[currentQueryLength] = 1
    else:
        if currentQueryLength == 0:
            print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough,"*",currentLine.strip(),"*"
            # print currentQueryTermDict,"*"
            # print "*",currentLine.strip(),"*"
            # print
        queryLengthDistributionDict[currentQueryLength] += 1
    currentLine = inputFileHandler0.readline()     
    numOfQueriesFallingThrough += 1
    # print numOfQueriesFallingThrough
    if numOfQueriesFallingThrough % 1000001 == 0:
        print numOfQueriesFallingThrough
        break

queryLengthList = []
queryLengthList = queryLengthDistributionDict.keys()
queryLengthList.sort(cmp=None, key=None, reverse=False)
# option1
TOTAL_NUM_OF_QUERIES = 95000
# option2
# TOTAL_NUM_OF_QUERIES = 1000000
totalQueryLength = 0
averageQueryLength = 0
for queryLength in queryLengthDistributionDict:
    print queryLength,queryLengthDistributionDict[queryLength],queryLengthDistributionDict[queryLength]/TOTAL_NUM_OF_QUERIES
    totalQueryLength += queryLengthDistributionDict[queryLength] * queryLength
averageQueryLength = totalQueryLength / TOTAL_NUM_OF_QUERIES
print "total # of queries counted:",numOfQueriesFallingThrough-1
print "average Query Length:",averageQueryLength
print "inputFileName:",inputFileName
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/debugForSimulationTest"
inputFileHandler0 = open(inputFileName,"r")
valueCounter = 0
for line in inputFileHandler0.readlines():
    # option1
    if line.strip().startswith("Showing 0 results out of 0."):
        print line.strip()
    
    # option2
    #if line.strip().startswith("qid:"):
    #    print line.strip()
    #if line.strip().startswith("Search:"):
    #    print line.strip()
    #if line.strip().startswith("the term:"):
    #    print line.strip()
    #if line.strip().startswith("This query has query terms"):
    #    print line.strip()
    #    valueCounter += 1
    #    print "numOfOutOfLexiconQueries:",valueCounter
    #    print
    
inputFileHandler0.close()
exit(1)
'''

'''
termDict = {}
inputFileName1 = "/data/jrodri04/lm/models/95KQ.5gram.ModKN.vocab"
inputFileHandler1 = open(inputFileName1,"r")
for line in inputFileHandler1.readlines():
    currentTerm = line.strip()
    if currentTerm not in termDict:
        termDict[currentTerm] = False
    else:
        print "duplicated term:",currentTerm
        exit(1)
print "len(termDict):",len(termDict)
inputFileHandler1.close()

inputFileName2 = "/data/jrodri04/lm/10M.95KQ.5gram.ModKN.txt"
inputFileHandler2 = open(inputFileName2,"r")
currentLine = inputFileHandler2.readline()
numOfQueriesFallingThrough = 1
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    for element in currentLineElements:
        if element.strip() in termDict:
            termDict[ element.strip() ] = True
            # for DEBUG ONLY
            # print "element.strip():",element.strip()
            pass
        else:
            if element.strip() != "":
                termWithBeginningAndEndingTag = "*" + element.strip() + "*" 
                print "query term:",termWithBeginningAndEndingTag,"does NOT appear in the lexicon"
                exit(1)
            else:
                pass # Just an empty string

    currentLine = inputFileHandler2.readline()
    numOfQueriesFallingThrough += 1
    if numOfQueriesFallingThrough % 1000000 == 0:
        print "numOfQueriesFallingThrough:",numOfQueriesFallingThrough

print "The following terms have NOT been generated in the fake query trace"
for term in termDict:
    if termDict[term] == False:
        print term

inputFileHandler2.close()
exit(1)
'''

'''
# 37827 query terms from Juan
# 38871 query terms from Wei
set1 = Set([])
set2 = Set([])
intersectionSet = Set([])
unionSet = Set([])

inputFileName1 = "/data/jrodri04/lm/models/95KQ.5gram.ModKN.vocab"
inputFileHandler1 = open(inputFileName1,"r")
for line in inputFileHandler1.readlines():
    set1.add(line.strip())
inputFileHandler1.close()
print "len(set1):",len(set1)

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsWithTheirLengthsOfInvertedList"
inputFileHandler2 = open(inputFileName2,"r")
for line in inputFileHandler2.readlines():
    set2.add(line.strip().split(" ")[0])
inputFileHandler2.close()
print "len(set2):",len(set2)
# print "set1:",set1
# print "set2:",set2
intersectionSet = set1.intersection( set2 )
unionSet = set1.union( set2 )
print "length of intersection Set:",len(intersectionSet)
print "length of union set:",len(unionSet)
print "symmetric difference:",len(intersectionSet)/len(unionSet)
'''

'''
# Join the result set together in order to upload them to the google shared doc for prof to see
print "Program Begins..."
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithTheirAverageIntersectionSizeAndMetaInfoBothFor1HumanAND2MachineQueries"
outputFileHandler = open(outputFileName,"w")

inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithTheirAverageIntersectionSizeAndMetaInfo"
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithTheirAverageIntersectionSizeFromMachineGeneratedQueriesAndMetaInfo"
inputFileName3 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithTheirAverageIntersectionSizeFromMachineGeneratedQueriesAndMetaInfo2"

inputFileHandler1 = open(inputFileName1,"r")
inputFileHandler2 = open(inputFileName2,"r")
inputFileHandler3 = open(inputFileName3,"r")
currentLineFromFile1 = inputFileHandler1.readline()
while currentLineFromFile1:
    currentLineElementsFromFile1 = currentLineFromFile1.strip().split(" ")
    currentLineFromFile2 = inputFileHandler2.readline()
    currentLineFromFile3 = inputFileHandler3.readline()
    currentLineElementsFromFile2 = currentLineFromFile2.strip().split(" ")
    currentLineElementsFromFile3 = currentLineFromFile3.strip().split(" ")
    
    outputLine = ""
    if currentLineElementsFromFile1[0] == currentLineElementsFromFile2[0] and currentLineElementsFromFile1[2] == currentLineElementsFromFile2[2] and currentLineElementsFromFile1[0] == currentLineElementsFromFile3[0] and currentLineElementsFromFile1[2] == currentLineElementsFromFile3[2]:
        outputLine = currentLineElementsFromFile1[0] + " " + currentLineElementsFromFile1[2] + " "
        outputLine += currentLineElementsFromFile1[1] + " " + currentLineElementsFromFile1[3] + " " + currentLineElementsFromFile1[4] + " "
        outputLine += currentLineElementsFromFile2[1] + " " + currentLineElementsFromFile2[3] + " " + currentLineElementsFromFile2[4] + " "
        outputLine += currentLineElementsFromFile3[1] + " " + currentLineElementsFromFile3[3] + " " + currentLineElementsFromFile3[4] + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)
    
    currentLineFromFile1 = inputFileHandler1.readline()

inputFileHandler1.close()
inputFileHandler2.close()
inputFileHandler3.close()
outputFileHandler.close()
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "inputFileName3:",inputFileName3
print "outputFileName:",outputFileName
print "Program Ends."
'''


'''
# small logic test of how many zero results returned for a set of queries.
# human generated query log
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/qidANDIntersectionSizeMappingTable"
# machine generated query log
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/machineGeneratedQIDsANDIntersectionSizeMappingTable"
inputFileHanlder = open(inputFileName,"r")
numOfQueriesReturnZeroResults = 0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    numOfResultsForCurrentQuery = int( lineElements[1] )
    if numOfResultsForCurrentQuery == 0:
        numOfQueriesReturnZeroResults += 1
print "numOfQueriesReturnZeroResults:",numOfQueriesReturnZeroResults
inputFileHanlder.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/QueriesWhichContainHighFreqTermAmong95KMachineGenerated2"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/QueriesWhichContainHighFreqTermAmong95KMachineGenerated_IRTK_Compatible_Format2"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(":")
    newQueryID = int(lineElements[1])
    queryContent =  lineElements[2]
    outputLine = str(newQueryID) + ":" + queryContent + "\n" 
    outputFileHandler.write(outputLine)
inputFileHandler0.close()
outputFileHandler.close()
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)
'''

'''
# step1: random select the queryIDs from the range [1,10000000]
randomlySelectedIDList = []
randomlySelectedIDDict = {}

# in debug
# totalNumOfRandomlySelectedSamples = 10
# in production
totalNumOfRandomlySelectedSamples = 95000
totalNumOfSamples = 10000000
# assume that the selectedID starts from 0 to 25205179 in the total of 25,205,179 (25M)
while len(randomlySelectedIDDict) != totalNumOfRandomlySelectedSamples:
    # Return a random integer N such that a <= N <= b
    selectedID = random.randint(1, totalNumOfSamples)
    if selectedID not in randomlySelectedIDDict:
        randomlySelectedIDList.append(selectedID)
        randomlySelectedIDDict[selectedID] = 1

randomlySelectedIDList.sort(cmp=None, key=None, reverse=False)
# print "randomlySelectedIDList:",randomlySelectedIDList
print "len(randomlySelectedIDList):",len(randomlySelectedIDList)
print "len(randomlySelectedIDDict):",len(randomlySelectedIDDict)

# step2:
# extract those queries from the machine generated query log and form a machine generated query log compared to the human made query log 
outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/95KQueriesMachineGenerated"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data/jrodri04/lm/10M.95KQ.5gram.ModKN.txt"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
originalLineCounter = 1
newQueryIDLineCounter = 1
while currentLine:
    if originalLineCounter in randomlySelectedIDDict:
        # for debug ONLY
        # print "currentLine:",currentLine.strip()
        queryContent = currentLine.strip()
        outputFileHandler.write(str(originalLineCounter) + " " + str(newQueryIDLineCounter) + " " + queryContent + "\n")
        newQueryIDLineCounter += 1
    currentLine = inputFileHandler0.readline()
    originalLineCounter += 1
inputFileHandler0.close()
outputFileHandler.close()
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
'''

'''
print "Program Begins..."
# key: int
# queryID in int format
# value: int
# query length (duplicate terms NOT counted)
queryIDsWIthTheirLengthDict = {}
inputQueryFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueries_head_95K"
inputQueryHandler = open(inputQueryFileName,"r")


for line in inputQueryHandler.readlines():
    elements = line.strip().split(":")
    queryID = int(elements[0])
    
    data = elements[1]
    data = data.lower()
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    
    queryContent = data
    
    queryContentElements = queryContent.strip().split(" ")
    currentQueryTermDict = {}
    for element in queryContentElements:
        if element.strip() != "":
            if element.strip() not in currentQueryTermDict:
                currentQueryTermDict[element.strip()] = 1
    
    # print "----->",queryID,len(currentQueryTermDict)
    
    if queryID not in queryIDsWIthTheirLengthDict:
        queryIDsWIthTheirLengthDict[queryID] = len(currentQueryTermDict)

print "len(queryIDsWIthTheirLengthDict):",len(queryIDsWIthTheirLengthDict)
print "queryIDsWIthTheirLengthDict[94996]:",queryIDsWIthTheirLengthDict[94996]
inputQueryHandler.close()
print "Program Ends."
'''


'''
# small logic of building the (qid,intersectionSizeMappingTable)
print "Program Begins..."
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/intersectionResult_machine_generated_queries_whole2"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/machineGeneratedQIDsANDIntersectionSizeMappingTable2"
outputFileHandler = open(outputFileName,"w")

currentQID = 0
currentQueryIntersectionSize = 0
qIDFoundFlag = False
queryIntersectionSizeFoundFlag = False
for line in inputFileHandler0.readlines():
    if line.startswith("qid:"):
        currentQID = int( line.strip().split(" ")[1] )
        qIDFoundFlag =True
        # for DEBUG
        # print currentQID
        # print line.strip()
    
    if line.startswith("Showing"):
        currentQueryIntersectionSize = int(line.strip().split(" ")[5][:-1])
        queryIntersectionSizeFoundFlag = True
        # for DEBUG
        # print currentQueryIntersectionSize
        # print line.strip()
        # print
    
    if qIDFoundFlag and queryIntersectionSizeFoundFlag:
        outputFileHandler.write(str(currentQID) + " " + str(currentQueryIntersectionSize) + "\n")
        qIDFoundFlag = False
        queryIntersectionSizeFoundFlag = False
    
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
inputFileHandler0.close()
outputFileHandler.close()
print "Program Ends."
'''

'''
# small logic for computing the average query intersectionSize
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/intersectionResultPart1"
inputFileHandler0 = open(inputFileName,"r")
totalQueryCost = 0.0
currentQueryCost = 0.0
averageQueryCost = 0.0
queryCounter = 0
for line in inputFileHandler0.readlines():
    if line.startswith("Showing"):
        currentQueryCost = float(line.strip().split("(")[1].split(")")[0].split(" ")[0])
        totalQueryCost += currentQueryCost
        queryCounter += 1
averageQueryCost = totalQueryCost/queryCounter
print "average query intersectionSize:",averageQueryCost
inputFileHandler0.close()
'''

'''
# working example in both local computer and remote pangolin server
# step1: select the random samples

randomlySelectedIDList = []
randomlySelectedIDDict = {}

# in debug
# totalNumOfRandomlySelectedSamples = 10
# in production
totalNumOfRandomlySelectedSamples = 1000000
totalNumOfSamples = 25205179
# assume that the selectedID starts from 0 to 25205179 in the total of 25,205,179 (25M)
while len(randomlySelectedIDDict) != totalNumOfRandomlySelectedSamples:
    # Return a random integer N such that a <= N <= b
    selectedID = random.randint(0, totalNumOfSamples-1)
    if selectedID not in randomlySelectedIDDict:
        randomlySelectedIDDict[selectedID] = 1
        randomlySelectedIDList.append(selectedID)
print "len(randomlySelectedIDDict):",len(randomlySelectedIDDict)
print "len(randomlySelectedIDList):",len(randomlySelectedIDList)
randomlySelectedIDList.sort(cmp=None, key=None, reverse=False)
print "randomlySelectedIDList[-2]:",randomlySelectedIDList[-2]
print "randomlySelectedIDList[-1]:",randomlySelectedIDList[-1]
time.sleep(10) # delays for 10 seconds
# print randomlySelectedIDDict

NUM_OF_LINES_NEEDED = 26000000
x = []
y = []

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurningDividedByNumOfPostingsForEachDocument"
inputFileHandler0 = open(inputFileName,"r")


currentLine = inputFileHandler0.readline()
currentLineIndex = 0

while currentLine and currentLineIndex < NUM_OF_LINES_NEEDED:
    currentLineElements = currentLine.strip().split(" ")
    currentNumOfPostingsRecorded = int(currentLineElements[2])
    currentXdocValue = float(currentLineElements[7])
    
    if currentLineIndex in randomlySelectedIDDict:
        x.append(currentNumOfPostingsRecorded)
        y.append(currentXdocValue)
        print "len(x):",len(x)
        print "currentLineIndex:",currentLineIndex
        print
        
    
    currentLine = inputFileHandler0.readline() 
    currentLineIndex += 1

matplotlib.pyplot.scatter(x,y)
matplotlib.pyplot.show()
inputFileHandler0.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurningDividedByNumOfPostingsForEachDocument"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/docID_numOfPostingsRecorded_XdocValueUsingGoodTuringMethod"
outputFileHandler = open(outputFileName,"w")

currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine
    currentLine = inputFileHandler0.readline()


outputFileHandler.close()
inputFileHandler0.close()
'''



'''
inputFileName1 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList_with_local_index_for_each_term_added"
inputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList_with_set_of_probability_added"
inputFileHandler1 = open(inputFileName1,"r")
inputFileHandler2 = open(inputFileName2,"r")

for index,line1 in enumerate(inputFileHandler1.readlines()):
    line1Elements = line1.strip().split(" ")
    line2 = inputFileHandler2.readline()
    line2Elements = line2.strip().split(" ")
    if line1Elements[0] == line2Elements[0]:
        pass
    else:
        print "index:",index
        print "line1:",line1.strip()
        print "line2:",line2.strip()
        exit(1)
inputFileHandler1.close()
inputFileHandler2.close()
'''

'''
termsDict ={}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList_with_local_index_for_each_term_added"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    if currentTerm not in termsDict:
        termsDict[currentTerm] = 1
    else:
        pass

print "len(termsDict):",len(termsDict)
inputFileHandler0.close()
'''

'''
print "Program Begins..."
outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList"
outputFileHanlder = open(outputFileName,"w")

postingIDList = []
postingIDDict = {}

# in debug
# totalNumOfRandomlySampledPostings = 10
# in production
totalNumOfRandomlySampledPostings = 10000
totalNumOfPostingsInIndex = 6451948010
# assume that the posting ID starts from 0 to 6451948009 in the total of 6,451,948,010 (6.5B)
while len(postingIDList) != totalNumOfRandomlySampledPostings:
    # Return a random integer N such that a <= N <= b
    postingID = random.randint(0, totalNumOfPostingsInIndex-1)
    if postingID not in postingIDDict:
        postingIDList.append(postingID)
        postingIDDict[postingID] = 1
    print len(postingIDList)
    
postingIDList.sort(cmp=None, key=None, reverse=False)
for postingID in postingIDList:
    # print postingIDList
    outputFileHanlder.write(str(postingID) + "\n")

print "Program Ends."
outputFileHanlder.close()
exit(1)
'''

'''
print "Program Begins..."
# The following answer ONE question, how many postings in total of our whole inverted index?
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler0 = open(inputFileName,"r")
currentLine = inputFileHandler0.readline()
totalNumOfPostings = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTerm = currentLineElements[0]
    currentTermNumOfPostingsInList = int(currentLineElements[1])
    totalNumOfPostings += currentTermNumOfPostingsInList
    currentLine = inputFileHandler0.readline()
print "totalNumOfPostings:",totalNumOfPostings
inputFileHandler0.close()
print "Program Ends."
exit(1)
'''

'''
print "Program Begins..."
docIDsNeededDict = {}
docIDsNeededDict["GX003-61-0529973"] = 1
docIDsNeededDict["GX021-66-11626454"] = 1
docIDsNeededDict["GX023-10-14566633"] = 1
docIDsNeededDict["GX023-98-2547874"] = 1
docIDsNeededDict["GX027-31-12566721"] = 1
docIDsNeededDict["GX057-81-12345226"] = 1
docIDsNeededDict["GX200-48-4132914"] = 1
docIDsNeededDict["GX229-55-7098182"] = 1
docIDsNeededDict["GX251-96-13027981"] = 1
docIDsNeededDict["GX265-40-16483965"] = 1
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/distributedWorkForFeatureGenerationPostingRankInDoc20130727/gov2DocumentWithTheirXdocValues_for_debug_ONLY_Xdoc_value_FIXED_with_internalDocID_added_20130805"
inputFileHandler0 = open(inputFileName,"r")
# skip this headline
inputFileHandler0.readline()
currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentLineTrecID = currentLineElements[0]
    currentLineDocID = currentLineElements[1]
    if currentLineTrecID not in docIDsNeededDict:
        pass
    else:
        print currentLineTrecID,currentLineDocID
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
print "Program Ends."
exit(1)
'''

'''
print "Program Begins..."
inputGoldStandardFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/debugTrecIDANDDocIDPair"
inputComparedFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/distributedWorkForFeatureGenerationPostingRankInDoc20130727/gov2DocumentWithTheirXdocValues_for_debug_ONLY_Xdoc_value_FIXED_with_internalDocID_added_20130805"

inputGoldStandardFileHandler = open(inputGoldStandardFileName,"r")
inputComparedFileHandler = open(inputComparedFileName,"r")

# skip the headline
inputComparedFileHandler.readline()

currentLineFromComparedFile = inputComparedFileHandler.readline()
while currentLineFromComparedFile:
    currentLineFromComparedFileElements = currentLineFromComparedFile.strip().split(" ")
    trecIDFromComparedFile = currentLineFromComparedFileElements[0]
    docIDFromComparedFile = currentLineFromComparedFileElements[1]
    
    currentLineFromGoldStandardFileElements = inputGoldStandardFileHandler.readline().strip().split(" ")
    trecIDFromGoldStandardFile = currentLineFromGoldStandardFileElements[0]
    docIDFromGoldStandardFile = currentLineFromGoldStandardFileElements[1]
    
    if trecIDFromComparedFile == trecIDFromGoldStandardFile and docIDFromComparedFile == docIDFromGoldStandardFile:
        pass
    else:
        print "critical error"
        print "trecIDFromComparedFile:",trecIDFromComparedFile
        print "trecIDFromGoldStandardFile:",trecIDFromGoldStandardFile
        print "docIDFromComparedFile:",docIDFromComparedFile
        print "docIDFromGoldStandardFile:",docIDFromGoldStandardFile
    
    currentLineFromComparedFile = inputComparedFileHandler.readline()

inputGoldStandardFileHandler.close()
inputComparedFileHandler.close()
print "Program Ends."
exit(1)
'''

'''
print "Program Begins..."
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/distributedWorkForFeatureGenerationPostingRankInDoc20130727/gov2DocumentWithTheirXdocValues_for_debug_ONLY_Xdoc_value"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/distributedWorkForFeatureGenerationPostingRankInDoc20130727/gov2DocumentWithTheirXdocValues_for_debug_ONLY_Xdoc_value_FIXED_with_internalDocID_added_20130805"
inputFileHandler0 = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

# Copy the headline
currentLine = inputFileHandler0.readline()
outputFileHandler.write(currentLine)

currentLine = inputFileHandler0.readline()
docIDCounter = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    newCurrentLine = currentLineElements[0] + " " + str(docIDCounter) + " " + currentLineElements[1] + " " + currentLineElements[2] + " " + currentLineElements[3] + " " + currentLineElements[4] + "\n"
    outputFileHandler.write(newCurrentLine)
    docIDCounter += 1
    currentLine = inputFileHandler0.readline()
    
outputFileHandler.close()
inputFileHandler0.close()
print "Program Ends."
exit(1)
'''

'''
# This part of logic is just to compute the average time intersectionSize/per query under the AND semantics
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/uniform_pruning_probability_90%Kept_PART_OF"
inputFileHandler0 = open(inputFileName,"r")
numOfQueriesEncountered = 0
totalMilliSeconds = 0
for line in inputFileHandler0.readlines():
    if line.startswith("Showing"):
        numOfQueriesEncountered += 1
        print line.strip()
        lineElements = line.strip().split("(")
        totalMilliSeconds += float( lineElements[1].strip().split(")")[0].split(" ")[0] )
        print

print "totalMilliSeconds:",totalMilliSeconds,"ms"
print "numOfQueriesEncountered:",numOfQueriesEncountered
print "average query intersectionSize:",totalMilliSeconds/numOfQueriesEncountered,"ms"
    
inputFileHandler0.close()
exit(1)
'''

'''
# small scripts for generating the 2ed factor needed documents info
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model19/TOPK_sorted_by_queryID/set_of_documents_with_their_Xdoc_values_for_DEBUG_soalr"
outputFileHanlder = open(outputFileName,"w")

neededTrecIDsDict = {}


# option1
# fill the neededTrecIDsDict via a file
# inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model19/TOPK_sorted_by_queryID/set_of_documents_needed_for_second_factor"
# inputAuxFileHandler = open(inputAuxFileName,"r")
# for line in inputAuxFileHandler.readlines():
#     currentTrecID = line.strip()
#    if currentTrecID not in neededTrecIDsDict:
#        neededTrecIDsDict[currentTrecID] = 1
#    else:
#        pass
        # print "error, mark1"
        # exit(1)
inputAuxFileHandler.close()


# option2
# fill the neededTrecIDsDict manually. :)
# for the term soalr
neededTrecIDsDict["GX003-61-0529973"] = 1
neededTrecIDsDict["GX021-66-11626454"] = 1
neededTrecIDsDict["GX023-10-14566633"] = 1
neededTrecIDsDict["GX023-98-2547874"] = 1
neededTrecIDsDict["GX027-31-12566721"] = 1
neededTrecIDsDict["GX057-81-12345226"] = 1
neededTrecIDsDict["GX200-48-4132914"] = 1
neededTrecIDsDict["GX229-55-7098182"] = 1
neededTrecIDsDict["GX251-96-13027981"] = 1
neededTrecIDsDict["GX265-40-16483965"] = 1

print "len(neededTrecIDsDict):",len(neededTrecIDsDict)


inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/distributedWorkForFeatureGenerationPostingRankInDoc20130727/gov2DocumentWithTheirXdocValues_for_debug_ONLY_Xdoc_value"
inputFileHandler0 = open(inputFileName,"r")

# skip the headline
inputFileHandler0.readline()

currentLine = inputFileHandler0.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTrecID = currentLineElements[0]
    if currentTrecID in neededTrecIDsDict:
        outputFileHanlder.write(currentLine)
    currentLine = inputFileHandler0.readline()
inputFileHandler0.close()
outputFileHanlder.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130429"
inputFileHandler0 = open(inputFileName,"r")
# skip the headline
inputFileHandler0.readline()
accumulatedProbability = 0.0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentCountingProbability = float( lineElements[4] ) 
    accumulatedProbability += currentCountingProbability
# The result will be: 0.951587450089
# of terms in the lexicon: 37728619
# of query terms in the 100K queries: 38871
# of unseen words: 37689748
# probablity mass for the unseen words: 1 - 0.951587450089 = 0.04841255
# probability for each unseen word: 0.04841255 / 37689748 = 0.0000000012845
print "accumulatedProbability:",accumulatedProbability
inputFileHandler0.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/prels"
inputFileHandler0 = open(inputFileName,"r")
trecIDDict = {}
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    trecID = lineElements[0]
    if trecID not in trecIDDict:
        trecIDDict[trecID] = 1
    else:
        pass
print "len(trecIDDict):",len(trecIDDict) 
inputFileHandler0.close()
'''

'''
# extract the TOPK results, here is setting the K
# K can be set to 10,100,1000,10000
K = 10

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/TrainingSetForFixingTheMissingTOPKResults20130715"
outputFileHandler = open(outputFileName,"w")

# This simple part of logic is to extract those missing results in order to satisfy the prof
queryIDList = ["1","21","25","27","40","41","53","61","62","64","65","68","97","99","102","111","121","147","161","165","170","172","178","191","196","197","198","200","202","210","217","218","219","227","230","232","242","246","270","273","275","286","300","302","305"]
originalRawResultFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/rawResultsHead10KANDSemanticsTOP2MResults"
originalRawResultFileHandler = open(originalRawResultFileName,"r")

queryAuxDict = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/rawResultsHead10KANDSemanticsTOP2MResultsAccessAuxFile_20130720"
inputFileHandler0 = open(inputFileName,"r")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    qidInStringFormat = lineElements[0]
    # num of query results which we don't need. (lineElements[1])
    beginningPositionForThisQuery = int(lineElements[2])
    endingPositionForThisQuery = int(lineElements[3])
    currentTuple = (beginningPositionForThisQuery,endingPositionForThisQuery)
    queryAuxDict[qidInStringFormat] = currentTuple
    
print "----->","len(queryAuxDict):",len(queryAuxDict)
print "queryAuxDict['1']:",queryAuxDict['1']
print "queryAuxDict['20']:",queryAuxDict['20']

for currentQIDInStringFormat in queryIDList:
    (beginningPositionInIntFormatInOriginalRawResultFile,endingPositionInIntFormatInOriginalRawResultFile) = queryAuxDict[currentQIDInStringFormat]
    if beginningPositionInIntFormatInOriginalRawResultFile != -1 and endingPositionInIntFormatInOriginalRawResultFile != -1:
        originalRawResultFileHandler.seek(beginningPositionInIntFormatInOriginalRawResultFile)
        
        for j in range(0,3):
            originalRawResultFileHandler.readline()
        
        # print "(right line for dict):"
        # key: term index
        # value: term
        currentQueryTermIndexDict = {}
        currentLine = originalRawResultFileHandler.readline()
        currentLineElements = currentLine.strip().split(" ")
        for element in currentLineElements:
            term = element.split(":")[0]
            termIndex = int(element.split(":")[1])
            if term not in currentQueryTermIndexDict:
                currentQueryTermIndexDict[termIndex] = term
        # print "currentQueryTermIndexDict:",currentQueryTermIndexDict
        
        for j in range(4,7):
            originalRawResultFileHandler.readline()            
        
        for j in range(0,K):
            currentLineElements = originalRawResultFileHandler.readline().strip().split(" ")
            if len(currentLineElements) == 65:
                base = 1
                for i in range(0,len(currentQueryTermIndexDict)):
                    term = str( currentQueryTermIndexDict[i] )
                    internal_doc_id = str( currentLineElements[63] )
                    external_trec_id = str( currentLineElements[64] )
                    totalBM25_score = float( currentLineElements[62] )
                    # newly updated for each training posting instance
                    partialBM25_score_component_part1 = float( currentLineElements[base + 10 + i] )
                    partialBM25_score_component_part2 = float( currentLineElements[base + 10 + 10 + i] )
                    partialBM25_score = float( currentLineElements[base + 10 + 10 + 10 + i] )
                    freq_in_collection = int( currentLineElements[base + 10 + 10 + 10 + i + 10] )
                    freq_in_doc = int( currentLineElements[base + 10 + 10 + 10 + i + 10 + 10] )
                    doc_words = int( currentLineElements[61] )
                    result_rank_for_this_posting = int( currentLineElements[0] )
                    
                    outputTrainingExample = str(result_rank_for_this_posting-1) + " " + currentQIDInStringFormat + " " + external_trec_id + " " + internal_doc_id + " " + term + " " + str(partialBM25_score_component_part1) + " " + str(partialBM25_score_component_part2) + " " + str(partialBM25_score) + " " + str(freq_in_collection) + " " + str(freq_in_doc) + " " + str(doc_words) + " " + str(totalBM25_score) + " " + str(result_rank_for_this_posting)
                    # for debug
                    print outputTrainingExample
                    outputFileHandler.write(outputTrainingExample + "\n")
            else:
                pass

inputFileHandler0.close()
outputFileHandler.close()
originalRawResultFileHandler.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130429"
inputFileHandler0 = open(inputFileName,"r")
inputFileHandler0.readline()

total2DProbability = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    current2DProbability = float(lineElements[3])
    total2DProbability += current2DProbability
print total2DProbability

inputFileHandler0.close()
'''

'''
queryIDDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_tail_10000_for_testing.arff"
inputFileHandler0 = open(inputFileName,"r")
for i in range(0,26):
    inputFileHandler0.readline()

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[1]
    TOP10ClassLabel = lineElements[-5]
    if TOP10ClassLabel == "True":
        if queryID not in queryIDDict:
            queryIDDict[queryID] = 1

print "len(queryIDDict):",len(queryIDDict)
inputFileHandler0.close()
'''

'''
# key: trecID
# value: none
queryIDDict ={}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Connected_Postings_Training_Set_2013_07_15_tfqAdded_tfcAdded_postingRankInDocAdd_postingRankInListAdd_percentageForPostingRankInDoc_percentageForPostingRankInList_classLabelsAdded_WITH_headline.train"
inputFileHandler0 = open(inputFileName,"r")
# skip the headline
inputFileHandler0.readline()
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[1]
    if queryID not in queryIDDict:
        queryIDDict[queryID] = 1
    else:
        pass

print "len(queryIDDict):",len(queryIDDict)
inputFileHandler0.close()
exit(1)
'''

'''
# small check on the file /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirCompletedPostingSet_for_debug_ONLY
print "program begins..."
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirCompletedPostingSet_for_debug_ONLY"
inputFileHandler0 = open(inputFileName,"r")
inputFileHandler0.readline()

for index,line in enumerate(inputFileHandler0.readlines()):
    lineElements = line.strip().split(" ")
    
    # step1: check whether the docSizeInWords has been computed correctly.
    value1InInt = int( lineElements[1] )
    value2InInt = int( lineElements[2] )
    if value1InInt == value2InInt:
        pass
    else:
        print "mark1:",value1InInt,value2InInt
    
    # step2: check whether we have enough postings actually recorded
    if value1InInt == len(lineElements[3:]):
        pass
    else:
        print "mark2:",value1InInt,len(lineElements[3:])
    
    print index,len(lineElements)

print "ALL Passed"
inputFileHandler0.close()
print "program ends."
exit(1)
'''

'''
# This logic is to create the file called: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/threeFeatureValuesForTrainingIn95KQueries
# This file contains the 3 feature values for the machine learned training purposes in head 95K Queries.
# This three features for the head 95K queries are:
# column1: query term
# column2: length_of_the_inverted_index (feature index 2)
# column3: term_freq_in_collection (feature index 3)
# column4: term_freq_in_queries (feature index 6)

queryTermLengthOfTheInvertedIndexDict = {}
queryTermTermFreqInCollectionDict = {}
queryTermTermFreqIn95KQueriesDict = {}
queryTermList = []

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfQueryTermsInCollection.txt"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    lengthOfTheInvertedIndex = int( lineElements[1] )
    termFreqInCollection = int( lineElements[2] )
    if queryTerm not in queryTermLengthOfTheInvertedIndexDict and queryTerm not in queryTermTermFreqInCollectionDict:
        queryTermLengthOfTheInvertedIndexDict[queryTerm] = lengthOfTheInvertedIndex
        queryTermTermFreqInCollectionDict[queryTerm] = termFreqInCollection
inputFileHandler0.close()

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsIn100KQueries_head_95K"
inputFileHandler0 = open(inputFileName,"r")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    term = lineElements[0]
    freqIn95KQueries = int(lineElements[1])
    if term not in queryTermTermFreqIn95KQueriesDict:
        queryTermTermFreqIn95KQueriesDict[term] = freqIn95KQueries

print "len(queryTermLengthOfTheInvertedIndexDict):",len(queryTermLengthOfTheInvertedIndexDict)
print "len(queryTermTermFreqInCollectionDict):",len(queryTermTermFreqInCollectionDict)
print "len(queryTermTermFreqIn95KQueriesDict):",len(queryTermTermFreqIn95KQueriesDict) 
inputFileHandler0.close()


outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/threeFeatureValuesForTrainingIn95KQueries"
outputFileHandler = open(outputFileName,"w")
queryTermList = queryTermTermFreqIn95KQueriesDict.keys()
queryTermList.sort(cmp=None, key=None, reverse=False)
for term in queryTermList:
    if term in queryTermTermFreqInCollectionDict:
        outputFileHandler.write( term + " " + str( queryTermLengthOfTheInvertedIndexDict[term] ) + " " + str( queryTermTermFreqInCollectionDict[term] ) + " " + str( queryTermTermFreqIn95KQueriesDict[term] ) + "\n")
    else:
        outputFileHandler.write( term + " " + "0" + " " + "0" + " " + str( queryTermTermFreqIn95KQueriesDict[term] ) + "\n")
outputFileHandler.close()
'''

'''
queryTermDict = {}
queryTermList = []

# option1
# Updated by Wei 2013/02/22
# No need to include the gov2 150 human judge queries, but ONLY consider the 100K efficiency task queries will be enough.
# note: (Old answers)Now, it is NOT only gov2 150 queries but also has the efficiency task queries as well
# inputQueryFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-150Gov2Queries.txt"

# option2
# note: Should contain all the queries in the 100K efficiency task query log
inputQueryFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueries_head_95K"
inputQueryHandler = open(inputQueryFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsONLY_head_95K"
outputFileHandler = open(outputFileName,"w")

for line in inputQueryHandler.readlines():
    elements = line.strip().split(":")
    queryID = int(elements[0])
    
    data = elements[1]
    data = data.lower()
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    
    queryContent = data

    queryContentElements = queryContent.strip().split(" ")
    
    for element in queryContentElements:
        if element.strip() != "":
            if element.strip() not in queryTermDict:
                queryTermDict[element.strip()] = 1

print "----->","len(queryTermDict):",len(queryTermDict)

queryTermList = []
queryTermList = list(queryTermDict)
queryTermList.sort(cmp=None, key=None, reverse=False)
for queryTerm in queryTermList:
    outputFileHandler.write(queryTerm + "\n")
     
inputQueryHandler.close()
outputFileHandler.close()
'''


'''
termFreqDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsIn100KQueries_tail_95K"
inputFileHandler0 = open(inputFileName,"r")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreqInStringFormat = lineElements[1]
    if queryTerm not in termFreqDict:
        termFreqDict[queryTerm] = queryTermFreqInStringFormat

print "len(termFreqDict):",len(termFreqDict)
inputFileHandler0.close()

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TrainingSet20130609"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TrainingSet20130609_fixFreq"
outputFileHandler = open(outputFileName,"w")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    # length = 16
    # print "len(lineElements):",len(lineElements)
    currentQueryTerm = lineElements[4]
    
    newOutputLine = ""
    for i in range(0,11):
        newOutputLine += lineElements[i] + " "
    newOutputLine += termFreqDict[currentQueryTerm] + " "
    for i in range(12,16):
        newOutputLine += lineElements[i] + " "
    newOutputLine = newOutputLine.strip() + "\n"
    
    outputFileHandler.write(newOutputLine)
outputFileHandler.close()
inputFileHandler0.close()
'''

'''
print "Program Begins..."
# 1st level
# key: trecID
# value: another dict
    # 2ed level
    # key: term
    # value: posting_rank_in_doc
docDictWithTermDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirForwardIndex_for_debug_ONLY"
inputFileHandler0 = open(inputFileName,"r")
# ignore the headerLine
# headerLine = inputFileHandler0.readline()
# print "headerLine:",headerLine

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    externalTrecID = lineElements[0]
    docWords = int( lineElements[1] )
    docPostingRecorded = int( lineElements[2] )
    currentDocPostingsList = []
    
    if externalTrecID not in docDictWithTermDict:
        docDictWithTermDict[externalTrecID] = {}
    else:
        print "mark1"
        exit(1)

    # checking mechanism
    if docPostingRecorded == len(lineElements[3:]):
        pass
    else:
        print "docPostingRecorded:",docPostingRecorded,type(docPostingRecorded)
        print "len(lineElements[2:]):",len(lineElements[3:]),type( len(lineElements[3:]) )
        exit(1)
    
    # loading mechanism
    for posingInfoTupleInStringFormat in lineElements[3:]:
        # print "posingInfoTupleInStringFormat:",posingInfoTupleInStringFormat,type(posingInfoTupleInStringFormat)
        tupleElements = posingInfoTupleInStringFormat.split("(")[1].split(")")[0].split(",")
        term = tupleElements[0]
        partialBM25InFloatFormat = float(tupleElements[1])
        currentDocPostingsList.append( (term,partialBM25InFloatFormat) )
            
    currentDocPostingsList.sort(cmp=None, key=itemgetter(1), reverse=True)
    
    
    # checking mechanism
    # if externalTrecID == "GX000-01-8658041":
    #    print "currentDocPostingsList:",currentDocPostingsList
    
    
    for index,postingTuple in enumerate(currentDocPostingsList):
        (term,_) = postingTuple
        docDictWithTermDict[externalTrecID][term] = index + 1
    
    
    # checking mechanism
    # if externalTrecID == "GX000-01-8658041":
    #    print "docDictWithTermDict[externalTrecID]:",docDictWithTermDict[externalTrecID]
    
    
    
    # check point
    # print "len(docDictWithTermDict[",externalTrecID,"]):",len(docDictWithTermDict[externalTrecID])
    # if len(docDictWithTermDict) % 10000 == 0:
    #    print "len(docDictWithTermDict):",len(docDictWithTermDict)
    #    break
    

print "len(docDictWithTermDict):",len(docDictWithTermDict)
print "docDictWithTermDict['GX235-40-13384592']['dssr']:",docDictWithTermDict['GX235-40-13384592']['dssr']
print "docDictWithTermDict['GX235-40-13384592']['pdf']:",docDictWithTermDict['GX235-40-13384592']['pdf']
inputFileHandler0.close()
print "Program Ends."
'''

'''
docsDict = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/that_kind_of_missing_posting_documents_for_1_term_query_sortedByTrecID"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    trecID = line.strip()
    if trecID not in docsDict:
        docsDict[trecID] = 1
    else:
        print "mark1"
        exit(1)

print "len(docsDict):",len(docsDict)

inputFileHandler0.close()

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130612_sortedByExternalTrecID"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130612_without_postings_missing_documents"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    trecID = line.strip().split(" ")[0]
    if trecID not in docsDict:
        outputFileHandler.write(line)
    else:
        pass

inputFileHandler0.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/document_set_needed_to_be_REPARSED_again_for_the_1_term_query_problem"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/that_kind_of_missing_posting_documents_for_1_term_query"
outputFileHanlder = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    externalTrecID = lineElements[2]
    outputFileHanlder.write(externalTrecID + "\n")

inputFileHandler0.close()
outputFileHanlder.close()
'''

'''
qIDsDict = {}
queryTermsDict = {}
externalTrecIDsDict = {}


inputFileName = "/data3/obukai/workspace/web-search-engine-wei/document_set_needed_to_be_REPARSED_again_for_the_1_term_query_problem"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    qID = lineElements[0]
    queryTerm = lineElements[1]
    externalTrecID = lineElements[2]
    
    if qID not in qIDsDict:
        qIDsDict[qID] = 1
    else:
        pass
    
    if queryTerm not in queryTermsDict:
        queryTermsDict[queryTerm] = 1
    else:
        pass
    
    if externalTrecID not in externalTrecIDsDict:
        externalTrecIDsDict[externalTrecID] = 1
    else:
        pass

print "len(qIDsDict):",len(qIDsDict)
print "qIDsDict:",qIDsDict
print "len(queryTermsDict):",len(queryTermsDict)
print "queryTermsDict:",queryTermsDict
print "len(externalTrecIDsDict):",len(externalTrecIDsDict)
inputFileHandler0.close()
'''

'''
# testing: GX000-32-3662810
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130612_sortedByExternalTrecID"
inputFileHandler1 = open(inputFileName,"r")
for line in inputFileHandler1.readlines():
    lineElements = line.strip().split(" ")
    externalTrecID = lineElements[0]
    if externalTrecID == "GX000-32-3662810":
        print line
        exit(1)

inputFileHandler1.close()
'''

'''
# checking logic for make sure that all the trecIDs has been sorted.
# Sorted
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130613_sortedByTrecID"
# Not sorted
# inputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130612"
inputFileHandler0 = open(inputFileName,"r")
previousTrecID = "GX000-00-0000000"

for index,line in enumerate( inputFileHandler0.readlines() ):
    currentTrecID = line.strip().split(" ")[0]
    if currentTrecID >= previousTrecID:
        # that is normal
        previousTrecID = currentTrecID
    else:
        print "NOT good"
        print "index:",index

print "Passed"
inputFileHandler0.close()
'''

'''
outputLineDict = {}

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/that_kind_of_missing_posting_documents_for_1_term_query_sortedByTrecID"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/gov2_files_with_CAT_command_for_the_missing_posting_documents_for_1_term_query"
outputFileHandler = open(outputFileName,"w")

outputFileHandler.write("19" + "\n")
for line in inputFileHandler0.readlines():
    trecIDElements = line.strip().split("-")
    gov2FolderName = trecIDElements[0]
    gov2SegmentName = trecIDElements[1]
    outputLine = "/data/jhe/trecdata/" + gov2FolderName + "/" + gov2SegmentName + ".gz" + "\n"
    if outputLine not in outputLineDict:
        outputLineDict[outputLine] = 1
        outputFileHandler.write(outputLine)
    else:
        # already write to the file
        pass

print "len(outputLineDict):",len(outputLineDict)
inputFileHandler0.close()
outputFileHandler.close()
'''

'''
print "Program Begins..."
trecIDsDictFromNeeded = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_06_09.smallTest.Combine.train_tfqAdded_labelsAdded_rankInListAdded_sortedByExternalTrecID"
inputFileHandler0 = open(inputFileName,"r")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    trecID = lineElements[2]
    if trecID not in trecIDsDictFromNeeded:
        trecIDsDictFromNeeded[trecID] = 1
    else:
        pass

print "len(trecIDsDictFromNeeded):",len(trecIDsDictFromNeeded)
# print "trecIDsDictFromNeeded:",trecIDsDictFromNeeded
inputFileHandler0.close()

trecIDsDictFromActual = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130612_sortedByExternalTrecID"
inputFileHandler0 = open(inputFileName,"r")

# ignore the headline
headerLine = inputFileHandler0.readline()

for line in inputFileHandler0.readlines():
    if line.startswith("trecID"):
        print "Encounter the headline"
    else:
        lineElements = line.strip().split(" ")
        trecID = lineElements[0]
        if trecID not in trecIDsDictFromActual:
            trecIDsDictFromActual[trecID] = 1
        else:
            pass

print "len(trecIDsDictFromActual):",len(trecIDsDictFromActual)
# print "trecIDsDictFromActual:",trecIDsDictFromActual
inputFileHandler0.close()


missingDocumentsCount = 0
for trecID in trecIDsDictFromNeeded:
    if trecID not in trecIDsDictFromActual:
        missingDocumentsCount += 1
        print "missing:",trecID
    else:
        # It is in the trecIDsDictFromActual, so that is OK
        pass

print "missingDocumentsCount:",missingDocumentsCount
print "Program Ends."
'''




'''
# This part of logic is to verify the file located and called: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermProbabilityDistribution_sortedByTerm
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermProbabilityDistribution_sortedByTerm"
inputFileHanlder = open(inputFileName,"r")
totalProbability = 0.0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentProbability = float(lineElements[1])
    totalProbability += currentProbability
print "totalProbability:",totalProbability
inputFileHanlder.close()
exit(1)
'''

'''
# UNDER CONSTRUCTION, NOT Finished and please do NOT USE.
# This part of the logic is to fix the ricardo thing
basePath = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/"
fileName1 = basePath + "CellROW1_0QueryTermsWithMetaInfo"
fileName2 = basePath + "CellROW2_0QueryTermsWithMetaInfo"
fileName3 = basePath + "CellROW3_0QueryTermsWithMetaInfo"
fileName4 = basePath + "CellROW4_0QueryTermsWithMetaInfo"
fileName5 = basePath + "CellROW5_0QueryTermsWithMetaInfo"

processingFileNameList = []
processingFileNameList.append(fileName1)
processingFileNameList.append(fileName2)
processingFileNameList.append(fileName3)
processingFileNameList.append(fileName4)
processingFileNameList.append(fileName5)

for fileName in processingFileNameList:
    inputFileHandler0 = open(fileName,"r")
    newOutputLine = ""
    for line in inputFileHandler0.readlines():
        lineElements = line.strip().split(" ")
        
    inputFileHandler0.close()
'''
    


# find those 36872359 terms with the key ROW1_0 in that cell.
# ROW1_0 [0,4]:36872359
# the length of the inverted list: [1,100)

# find those 688369 terms with the key ROW2_0 in that cell.
# ROW2_0 [5,9]:688369
# the length of the inverted list: [100,665)

# find those 87252 terms with the key ROW3_0 in that cell.
# ROW3_0 [10,25]:87252
# the length of the inverted list: [665,2473)

# find those 32085 terms with the key ROW4_0 in that cell.
# ROW4_0 [26,64]:32085
# the length of the inverted list: [2473,9964)

# find those 13296 terms with the key ROW5_0 in that cell.
# ROW5_0 [65,999]:13296
# the length of the inverted list: [9964,25205179]

'''
lowerUpperBoundTupleList = []
lowerUpperBoundTupleList.append( ("ROW1_0",1,100) )
lowerUpperBoundTupleList.append( ("ROW2_0",100,665) )
lowerUpperBoundTupleList.append( ("ROW3_0",665,2473) )
lowerUpperBoundTupleList.append( ("ROW4_0",2473,9964) )
lowerUpperBoundTupleList.append( ("ROW5_0",9964,25205179) )
print "len(lowerUpperBoundTupleList):",len(lowerUpperBoundTupleList)
'''

'''
# This part of logic is to output the query terms which are in the CellROW1_0, CellROW2_0, CellROW3_0, CellROW4_0, CellROW5_0
queryTermWithTheirRealFreqIn85KQueriesDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm] = queryTermFreq
    else:
        print "error,Mark3"
        exit(1)
print "len(queryTermWithTheirRealFreqIn85KQueriesDict):",len(queryTermWithTheirRealFreqIn85KQueriesDict)
inputFileHandler0.close()

outputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW1_0QueryTermsWithMetaInfo"
outputFileHandler1 = open(outputFileName1,"w")

outputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW2_0QueryTermsWithMetaInfo"
outputFileHandler2 = open(outputFileName2,"w")

outputFileName3 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW3_0QueryTermsWithMetaInfo"
outputFileHandler3 = open(outputFileName3,"w")

outputFileName4 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW4_0QueryTermsWithMetaInfo"
outputFileHandler4 = open(outputFileName4,"w")



# add the file called: 
numOfCurrentQueryTermsInCellROW1_0 = 0
numOfCurrentQueryTermsInCellROW2_0 = 0
numOfCurrentQueryTermsInCellROW3_0 = 0
numOfCurrentQueryTermsInCellROW4_0 = 0
THE_CELL_ROW1_0_PROBABILITY = 2.7962236405996843e-10
THE_CELL_ROW2_0_PROBABILITY = 1.6040397629079132e-08
THE_CELL_ROW3_0_PROBABILITY = 1.1538218688794478e-07
THE_CELL_ROW4_0_PROBABILITY = 3.2578867212223025e-07
VERY_BIG_NUMBER = 99999999

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermListLength = int( lineElements[1] )
    
    #lowerUpperBoundTupleList.append( ("ROW1_0",1,100) )
    #lowerUpperBoundTupleList.append( ("ROW2_0",100,665) )
    #lowerUpperBoundTupleList.append( ("ROW3_0",665,2473) )
    #lowerUpperBoundTupleList.append( ("ROW4_0",2473,9964) )
    
    # for ROW1_0
    if queryTermListLength >= 1 and queryTermListLength < 100 and queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        numOfCurrentQueryTermsInCellROW1_0 += 1
        print "queryTerm:",queryTerm,"queryTermListLength:",queryTermListLength
        # For these terms, they do NOT have RealFreqIn85KQueriesDict
        # I can ONLY assign the VERY_BIG_NUMBER to the ricardoRatio
        # ricardoRatio = queryTermListLength / queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm]
        # current version
        ricardoRatio = VERY_BIG_NUMBER
        outputFileHandler1.write(queryTerm + " " + str( 0 ) + " " + str(queryTermListLength) + " " + str(THE_CELL_ROW1_0_PROBABILITY) + " " + str(ricardoRatio) + "\n")
        # break
    
    # for ROW2_0
    if queryTermListLength >= 100 and queryTermListLength < 665 and queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        numOfCurrentQueryTermsInCellROW2_0 += 1
        print "queryTerm:",queryTerm,"queryTermListLength:",queryTermListLength
        # For these terms, they do NOT have RealFreqIn85KQueriesDict
        # I can ONLY assign the VERY_BIG_NUMBER to the ricardoRatio
        # ricardoRatio = queryTermListLength / queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm]
        # current version
        ricardoRatio = VERY_BIG_NUMBER
        outputFileHandler2.write(queryTerm + " " + str( 0 ) + " " + str(queryTermListLength) + " " + str(THE_CELL_ROW2_0_PROBABILITY) + " " + str(ricardoRatio) + "\n")
        # break
        
    # for ROW3_0
    if queryTermListLength >= 665 and queryTermListLength < 2473 and queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        numOfCurrentQueryTermsInCellROW3_0 += 1
        print "queryTerm:",queryTerm,"queryTermListLength:",queryTermListLength
        # For these terms, they do NOT have RealFreqIn85KQueriesDict
        # I can ONLY assign the VERY_BIG_NUMBER to the ricardoRatio
        # ricardoRatio = queryTermListLength / queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm]
        # current version
        ricardoRatio = VERY_BIG_NUMBER
        outputFileHandler3.write(queryTerm + " " + str( 0 ) + " " + str(queryTermListLength) + " " + str(THE_CELL_ROW3_0_PROBABILITY) + " " + str(ricardoRatio) + "\n")
        # break
    
    # for ROW4_0
    if queryTermListLength >= 2473 and queryTermListLength < 9964 and queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        numOfCurrentQueryTermsInCellROW4_0 += 1
        print "queryTerm:",queryTerm,"queryTermListLength:",queryTermListLength
        # For these terms, they do NOT have RealFreqIn85KQueriesDict
        # I can ONLY assign the VERY_BIG_NUMBER to the ricardoRatio
        # ricardoRatio = queryTermListLength / queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm]
        # current version
        ricardoRatio = VERY_BIG_NUMBER
        outputFileHandler4.write(queryTerm + " " + str( 0 ) + " " + str(queryTermListLength) + " " + str(THE_CELL_ROW4_0_PROBABILITY) + " " + str(ricardoRatio) + "\n")
        # break 

print "numOfCurrentQueryTermsInCellROW4_0:",numOfCurrentQueryTermsInCellROW4_0
print "numOfCurrentQueryTermsInCellROW3_0:",numOfCurrentQueryTermsInCellROW3_0
print "numOfCurrentQueryTermsInCellROW2_0:",numOfCurrentQueryTermsInCellROW2_0
print "numOfCurrentQueryTermsInCellROW1_0:",numOfCurrentQueryTermsInCellROW1_0

outputFileHandler1.close()
outputFileHandler2.close()
outputFileHandler3.close()
outputFileHandler4.close()    
inputFileHandler0.close()
'''

'''
# This part of logic:
# compute the num Of Query Terms Out Of Lexicon Beside 85K Queries
numOfQueryTermsOutOfLexiconBeside85KQueries = 0

queryTermNOTExistedInLexiconDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsWhichNOTExistedInTheCollection.txt"
inputAuxFileHandler = open(inputAuxFileName,"r")
for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[-1]
    if queryTerm not in queryTermNOTExistedInLexiconDict:
        queryTermNOTExistedInLexiconDict[queryTerm] = 1

print "len(queryTermNOTExistedInLexiconDict):",len(queryTermNOTExistedInLexiconDict)
inputAuxFileHandler.close()

# check the number 63(out of lexicon terms in the last 15% Queries)
basePath = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/"
file1Path = basePath + "100KQueries_1_10%"
file2Path = basePath + "100KQueries_2_4%"
file3Path = basePath + "100KQueries_3_1%"
fileNameList = []
fileNameList.append(file1Path)
fileNameList.append(file2Path)
fileNameList.append(file3Path)
for fileName in fileNameList:
    fileHandler = open(fileName,"r")
    for line in fileHandler.readlines():
        
        queryID = line.strip().split(":")[0]
        queryTermList = line.strip().split(":")[1].strip().split(" ")
        # print "queryTermList:",queryTermList
        
        data = ""
        for element in queryTermList:
            data += element + " "
        
        # print "data(old):",data
        # print "original data:",data
        
        for i in range(0,len(data)):
            # print "data[i]:",ord(data[i])
            if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                # Just replace them with a space.
                data = data[:i] + " " + data[i+1:]
    
        # print "data(new):",data
        
        currentNewQueryTermList = data.strip().split(" ")
        currentNewQueryTermDict = {}
        
        for queryTerm in currentNewQueryTermList:
            if queryTerm.strip() != "":
                queryTermLower = queryTerm.lower()
                if queryTermLower not in currentNewQueryTermDict:
                    currentNewQueryTermDict[queryTermLower] = 1
    
        for queryTerm in currentNewQueryTermDict:
            # do sth.
            if queryTerm in queryTermNOTExistedInLexiconDict:
                numOfQueryTermsOutOfLexiconBeside85KQueries += 1

    fileHandler.close()

print "numOfQueryTermsOutOfLexiconBeside85KQueries:",numOfQueryTermsOutOfLexiconBeside85KQueries
'''

'''
# This part of logic is to compute the following:
# num Of Queries Can NOT Be Answered :)
queryTermWhichNOTExistedInLexiconDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsWhichNOTExistedInTheCollection.txt"
inputAuxFileHandler = open(inputAuxFileName,"r")

for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[-1]
    if queryTerm not in queryTermWhichNOTExistedInLexiconDict:
        queryTermWhichNOTExistedInLexiconDict[queryTerm] = 1

print "len(queryTermWhichNOTExistedInLexiconDict):",len(queryTermWhichNOTExistedInLexiconDict)
# print "queryTermWhichNOTExistedInLexiconDict:",queryTermWhichNOTExistedInLexiconDict
inputAuxFileHandler.close()



inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/100KQueries"
inputFileHandler0 = open(inputFileName,"r")
numOfQueriesCanNOTBeAnswered = 0
queriesCanNOTBeAnsweredList = []
for line in inputFileHandler0.readlines():
    # print "line:",line.strip()
    queryID = line.strip().split(":")[0]
    queryTermList = line.strip().split(":")[1].strip().split(" ")
    # print "queryTermList:",queryTermList
    
    data = ""
    for element in queryTermList:
        data += element + " "
    
    # print "data(old):",data
    # print "original data:",data
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]

    # print "data(new):",data
    
    currentNewQueryTermList = data.strip().split(" ")
    currentNewQueryTermDict = {}
    
    for queryTerm in currentNewQueryTermList:
        if queryTerm.strip() != "":
            queryTermLower = queryTerm.lower()
            if queryTermLower not in currentNewQueryTermDict:
                currentNewQueryTermDict[queryTermLower] = 1

    for queryTerm in currentNewQueryTermDict:
        # whether it is in the dict or not
        if queryTerm in queryTermWhichNOTExistedInLexiconDict:
            numOfQueriesCanNOTBeAnswered += 1
            queriesCanNOTBeAnsweredList.append( line.strip() )
            break

print "numOfQueriesCanNOTBeAnswered:",numOfQueriesCanNOTBeAnswered
# print "queriesCanNOTBeAnsweredList:",queriesCanNOTBeAnsweredList
inputFileHandler0.close()
'''


'''
# This part of logic is to produce the cellProbabilityTupleList
cellProbabilityTupleList = []
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    if not line.strip().startswith("SUM"):
        lineElements = line.strip().split(" ")
        cellKey = lineElements[0][3:]
        probability = float(lineElements[1])
        cellKeyProbabilityTuple = (cellKey,probability)
        cellProbabilityTupleList.append(cellKeyProbabilityTuple)
    else:
        # cause line starts with "SUM", so just ignore that line
        pass



cellProbabilityTupleList.sort(cmp=None, key=itemgetter(1), reverse=False)
for tuple in cellProbabilityTupleList:
    print tuple

print "len(cellProbabilityTupleList):",len(cellProbabilityTupleList)
inputFileHandler0.close()
'''




'''
# This part of logic is to count:
# the total number of postings
# the total number of terms in the lexicon
print "program begins..."
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler0 = open(inputFileName,"r")
dataLine = inputFileHandler0.readline()
totalNumPostings = 0
originalLineCounter = 0
while dataLine:
    totalNumPostings += int( dataLine.strip().split(" ")[1] )
    originalLineCounter += 1
    dataLine = inputFileHandler0.readline()
    # if originalLineCounter == 2:
    #    break

print "totalNumPostings:",totalNumPostings
print "originalLineCounter:",originalLineCounter

inputFileHandler0.close()
print "program ends."
'''

'''
# This temp logic is to combine 2D and 1D probability together
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D"
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity2D"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D"

inputFileHanlder1 = open(inputFileName1,"r")
inputFileHanlder2 = open(inputFileName2,"r")
outputFileHandler = open(outputFileName,"w")

lineFromFile1 = inputFileHanlder1.readline()
lineFromFile2 = inputFileHanlder2.readline()

outputFileHandler.write("queryTerm goldStandardRealProbability 1D 2D" + "\n")


while lineFromFile1:
    lineElementsFromFile1 = lineFromFile1.strip().split(" ")
    lineElementsFromFile2 = lineFromFile2.strip().split(" ")
    if lineElementsFromFile1[0] == lineElementsFromFile2[0]:
        outputFileHandler.write( lineFromFile1.strip() + " " + lineElementsFromFile2[2] + "\n")
    lineFromFile1 = inputFileHanlder1.readline()
    lineFromFile2 = inputFileHanlder2.readline()
inputFileHanlder1.close()
inputFileHanlder2.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_withProbablityAdded"

inputFileHandler0 = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    goldStandardProbability = int( lineElements[1] ) / 413533
    outputFileHandler.write(line.strip() + " " + str(goldStandardProbability) + "\n")

inputFileHandler0.close()    
outputFileHandler.close()
'''


'''
# easy check of the totalFreq
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_3_1%"
inputFileHandler0 = open(inputFileName,"r")

totalFreq = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    totalFreq += int( lineElements[1] )
print "totalFreq:",totalFreq

inputFileHandler0.close()
'''

'''
# Unknown usage knowledge
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_good_turing_output"
inputFileHandler0 = open(inputFileName,"r")

# ignore the file headlines
inputFileHandler0.readline()
inputFileHandler0.readline()
inputFileHandler0.readline()
inputFileHandler0.readline()

currentTotalProbability = 0.0

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    num_of_occurences = int( lineElements[1] )
    probability = float( lineElements[3] )
    currentTotalProbability += probability * num_of_occurences

print "currentTotalProbability:",currentTotalProbability
inputFileHandler0.close()
exit(1)
'''

'''
# This part of logic is to compute # of UNIQUE TERMS SEEN:
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_sortedByFreqR"
inputFileHandler0 = open(inputFileName,"r")
totalFreq = 0

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int( lineElements[1] )
    totalFreq += currentFreq

print "totalFreq:",totalFreq
inputFileHandler0.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheLatestProbabilitySettings1DProbabilityAdded"
inputFileHandler0 = open(inputFileName,"r")
inputFileHandler0.readline()
lastColumnProbabilityDict = {}
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    # probability = float( lineElements[-1] )
    probability = float( lineElements[2] )
    if probability not in lastColumnProbabilityDict:
        lastColumnProbabilityDict[probability] = 1
    else:
        lastColumnProbabilityDict[probability] += 1

print "len(lastColumnProbabilityDict):",len(lastColumnProbabilityDict)
# print "lastColumnProbabilityDict:",lastColumnProbabilityDict

# print "lastColumnProbabilityDict['1.3870025585078873e-09']:",lastColumnProbabilityDict['1.3870025585078873e-09']
# print "lastColumnProbabilityDict['1.32367943426e-06']:",lastColumnProbabilityDict['1.32367943426e-06']
# print "lastColumnProbabilityDict['4.13057762416e-06']:",lastColumnProbabilityDict['4.13057762416e-06']
# print "lastColumnProbabilityDict['6.56625976833e-06']:",lastColumnProbabilityDict['6.56625976833e-06']
# print "..."
# print "lastColumnProbabilityDict['4.70009360308e-05']:",lastColumnProbabilityDict['4.70009360308e-05']

inputFileHandler0.close()
exit(1)
'''


'''
# The part of logic is for checking correctness of the final probability
z1 = 4.89033137093e-06
print 1 - math.pow((1 - z1),41221),0.182565789474
exit(1)
'''

'''
DIVIDING_FACTOR(add one) is: 0.0072427900025
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_withNormalizedPredictedProbabilityAdded_withAddONEUnnormalizedProbabilityAdded"
inputFileHanlder = open(inputFileName,"r")
inputFileHanlder.readline()
currentTotalProbablity = 0.0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentProbability = float(lineElements[4])
    currentTotalProbablity += currentProbability
print "currentTotalProbablity:",currentTotalProbablity
inputFileHanlder.close()
'''

'''
# This part of logic is to check the correctness of the normalized probability for the KL measure.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_withNormalizedPredictedProbabilityAdded_withAddONEUnnormalizedProbabilityAdded_withNormalizedPredictedProbabilityAdded"
inputFileHanlder = open(inputFileName,"r")
# ignore the head line
inputFileHanlder.readline()
currentTotalProbability = 0.0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    normalizedProbability = float(lineElements[5])
    currentTotalProbability += normalizedProbability

print "currentTotalProbability:",currentTotalProbability
inputFileHanlder.close()
'''

'''
# DIVIDING_FACTOR = 0.749559731735
DIVIDING_FACTOR = 0.0072427900025
# This part of logic is to normalize the estimated probability in order to fit into the KL measure.
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_withNormalizedPredictedProbabilityAdded_withAddONEUnnormalizedProbabilityAdded_withNormalizedPredictedProbabilityAdded"
outputFileHanlder = open(outputFileName,"w")

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_withNormalizedPredictedProbabilityAdded_withAddONEUnnormalizedProbabilityAdded"
inputFileHanlder = open(inputFileName,"r")
oldHeadLine = inputFileHanlder.readline().strip()
newHeadLine = oldHeadLine + " " + "addOneNormalizedProbablity" + "\n"
outputFileHanlder.write(newHeadLine)
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    unNormalizedProbability = float(lineElements[4])
    newNormalizedProbability = unNormalizedProbability / DIVIDING_FACTOR
    outputFileHanlder.write(line.strip() + " " + str(newNormalizedProbability) + "\n")

inputFileHanlder.close()
outputFileHanlder.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/100KQueries_1_10%_with_query_terms"
inputFileHandler0 = open(inputFileName,"r")
cellKeyWithQueryTermListDict = {}
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    cellKey = lineElements[0]
    numOfQueryTerms = int( lineElements[1] )
    if cellKey not in cellKeyWithQueryTermListDict:
        cellKeyWithQueryTermListDict[cellKey] = []
        for i in range(0+2,0+2+numOfQueryTerms):
             cellKeyWithQueryTermListDict[cellKey].append( lineElements[i] )
    else:
        print "Unexpected Behaviour."
        exit(1)
inputFileHandler0.close()
print "len(cellKeyWithQueryTermListDict):",len(cellKeyWithQueryTermListDict)
# print "cellKeyWithQueryTermListDict:",cellKeyWithQueryTermListDict

list1 = []
for i in range(0,5):
    cellKey = "0" + "_" + str(i)
    list1 += cellKeyWithQueryTermListDict[cellKey]

list2 = []
for i in range(5,10):
    cellKey = "0" + "_" + str(i)
    list2 += cellKeyWithQueryTermListDict[cellKey]
    
list3 = []
for i in range(10,26):
    cellKey = "0" + "_" + str(i)
    list3 += cellKeyWithQueryTermListDict[cellKey]

list4 = []
for i in range(26,65):
    cellKey = "0" + "_" + str(i)
    list4 += cellKeyWithQueryTermListDict[cellKey]

list5 = []
for i in range(65,1000):
    cellKey = "0" + "_" + str(i)
    list5 += cellKeyWithQueryTermListDict[cellKey]

set1Original = set(list1)
set2Original = set(list2)
set3Original = set(list3)
set4Original = set(list4)
set5Original = set(list5)

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/fixingTheMisCountingProblemInfo20130420"
inputFileHandler0 = open(inputFileName,"r")

listsDict = {}
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    number = int(lineElements[1])
    if key not in listsDict:
        listsDict[key] = []
        for i in range(2,2+number):
            listsDict[key].append(lineElements[i])

print "original:"
print "ROW1_0 [0,4]:",len(set1Original)
print "ROW2_0 [5,9]:",len(set2Original)
print "ROW3_0 [10,25]:",len(set3Original)
print "ROW4_0 [26,64]:",len(set4Original)
print "ROW5_0 [65,999]:",len(set5Original)

set1Exclude = set(listsDict["ROW1_0"])
set2Exclude = set(listsDict["ROW2_0"])
set3Exclude = set(listsDict["ROW3_0"])
set4Exclude = set(listsDict["ROW4_0"])
set5Exclude = set(listsDict["ROW5_0"])

print "exclude:"
print "ROW1_0 [0,4]:",len(set1Exclude)
print "ROW2_0 [5,9]:",len(set2Exclude)
print "ROW3_0 [10,25]:",len(set3Exclude)
print "ROW4_0 [26,64]:",len(set4Exclude)
print "ROW5_0 [65,999]:",len(set5Exclude)

print "rest:"
print "ROW1_0 [0,4]:",len(set1Original) - len( set1Original.intersection(set1Exclude) )
print "ROW2_0 [5,9]:",len(set2Original) - len( set2Original.intersection(set2Exclude) )
print "ROW3_0 [10,25]:",len(set3Original) - len( set3Original.intersection(set3Exclude) )
print "ROW4_0 [26,64]:",len(set4Original) - len( set4Original.intersection(set4Exclude) )
print "ROW5_0 [65,999]:",len(set5Original) - len( set5Original.intersection(set5Exclude) )

print "listsDict['ROW3_0']:",listsDict["ROW3_0"]
print "set5Original:",set5Original
print "set5Exclude:",set5Exclude
inputFileHandler0.close()
'''

'''
# Purpose: this part of logic is to compute the set of query terms which have been miscounted in the unseen columns :)
# The following are the results which have been mis-counted:
# len(queryTermWithFreqInCollectionDict): 38871
# numForCellKeyROW1_0: 0
# numForCellKeyROW2_0: 0
# numForCellKeyROW3_0: 3
# numForCellKeyROW4_0: 28
# numForCellKeyROW5_0: 2475

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/fixingTheMisCountingProblemInfo20130420"
outputFileHandler = open(outputFileName,"w")


queryTermWithFreqInCollectionDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputAuxFileHandler = open(inputAuxFileName,"r")

for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    freqInCollection = int( lineElements[1] )
    if queryTerm not in queryTermWithFreqInCollectionDict:
        queryTermWithFreqInCollectionDict[queryTerm] = freqInCollection

print "len(queryTermWithFreqInCollectionDict):",len(queryTermWithFreqInCollectionDict)
inputAuxFileHandler.close()

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%_withSomeProbabilityAdded_sortedByRealProbability"
inputFileHandler0 = open(inputFileName,"r")
totalNumOfQueryTermsProcessed = 0

numForCellKeyROW1_0 = 0
numForCellKeyROW2_0 = 0
numForCellKeyROW3_0 = 0
numForCellKeyROW4_0 = 0
numForCellKeyROW5_0 = 0

queryTermListForCellKeyROW1_0 = []
queryTermListForCellKeyROW2_0 = []
queryTermListForCellKeyROW3_0 = []
queryTermListForCellKeyROW4_0 = []
queryTermListForCellKeyROW5_0 = []


for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    freq = int( lineElements[1] ) 
    if freq >= 20:
       totalNumOfQueryTermsProcessed += 1
       # ROW1 [0,4] [1,100)
       # ROW2 [5,9] [100,665)
       # ROW3 [10,25] [665,2473)
       # ROW4 [26,64] [2473,9964)
       # ROW5 [65,999] [9964,25205179)
       if queryTermWithFreqInCollectionDict[queryTerm] >= 1 and queryTermWithFreqInCollectionDict[queryTerm] < 100:
           numForCellKeyROW1_0 += 1
           queryTermListForCellKeyROW1_0.append(queryTerm)
       elif queryTermWithFreqInCollectionDict[queryTerm] >= 100 and queryTermWithFreqInCollectionDict[queryTerm] < 665:
           numForCellKeyROW2_0 += 1
           queryTermListForCellKeyROW2_0.append(queryTerm)
       elif queryTermWithFreqInCollectionDict[queryTerm] >= 665 and queryTermWithFreqInCollectionDict[queryTerm] < 2473:
           numForCellKeyROW3_0 += 1
           queryTermListForCellKeyROW3_0.append(queryTerm)
       elif queryTermWithFreqInCollectionDict[queryTerm] >= 2473 and queryTermWithFreqInCollectionDict[queryTerm] < 9964:
           numForCellKeyROW4_0 += 1
           queryTermListForCellKeyROW4_0.append(queryTerm)
       elif queryTermWithFreqInCollectionDict[queryTerm] >= 9964 and queryTermWithFreqInCollectionDict[queryTerm] <= 25205179:
           numForCellKeyROW5_0 += 1
           queryTermListForCellKeyROW5_0.append(queryTerm)

print "numForCellKeyROW1_0:",numForCellKeyROW1_0
print "numForCellKeyROW2_0:",numForCellKeyROW2_0
print "numForCellKeyROW3_0:",numForCellKeyROW3_0
print "numForCellKeyROW4_0:",numForCellKeyROW4_0
print "numForCellKeyROW5_0:",numForCellKeyROW5_0


# print "queryTermListForCellKeyROW1_0:",queryTermListForCellKeyROW1_0
# print "queryTermListForCellKeyROW2_0:",queryTermListForCellKeyROW2_0
# print "queryTermListForCellKeyROW3_0:",queryTermListForCellKeyROW3_0
# print "queryTermListForCellKeyROW4_0:",queryTermListForCellKeyROW4_0
# print "queryTermListForCellKeyROW5_0:",queryTermListForCellKeyROW5_0


tempList = []
tempList.append(queryTermListForCellKeyROW1_0)
tempList.append(queryTermListForCellKeyROW2_0)
tempList.append(queryTermListForCellKeyROW3_0)
tempList.append(queryTermListForCellKeyROW4_0)
tempList.append(queryTermListForCellKeyROW5_0)

for i in range(0,5):
    outputLine = "ROW" + str(i) + "_0" + " "
    outputLine += str( len( tempList[i] ) ) + " "
    for queryTerm in tempList[i]:
        outputLine += queryTerm + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
# TODO: need to subtract the part query terms which have been double counted.
# This part of logic is to compute the final probability mass for the whole predicted area
# This step0 is to add the missing but popular query term probability mass into consideration.
print "step0"
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%_withSomeProbabilityAdded_sortedByRealProbability"
inputFileHandler0 = open(inputFileName,"r")
part2TotalProbabilityShouldBeAdded = 0.0
part2NumberOfQueryTerms = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    freq = int( lineElements[1] )
    probability = float( lineElements[2] )
    if freq >= 20:
        part2TotalProbabilityShouldBeAdded += probability
        part2NumberOfQueryTerms += 1
print "part2TotalProbabilityShouldBeAdded:",part2TotalProbabilityShouldBeAdded

inputFileHandler0.close()

print "step1"
cellKeyWithItsProbabilityDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    cellKey = lineElements[0]
    cellProbabilityValue = float(lineElements[1])
    if cellKey not in cellKeyWithItsProbabilityDict:
        cellKeyWithItsProbabilityDict[cellKey] = cellProbabilityValue
    else:
        pass
print "len(cellKeyWithItsProbabilityDict):",len(cellKeyWithItsProbabilityDict)
inputFileHandler0.close()

print "step2"
classLabelList = ["ROW1","ROW2","ROW3","ROW4","ROW5"]
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityTableUsingProfIdea20130420_fixed"
inputFileHandler0 = open(inputFileName,"r")
dataLine = inputFileHandler0.readline()
while not dataLine.strip().startswith("table:denominator:freqOfFreqForTheLexiconTerm"):
    dataLine = inputFileHandler0.readline()
# print "mark1:"
# print dataLine
# print "mark2:"
inputFileHandler0.readline()
inputFileHandler0.readline()
inputFileHandler0.readline()
inputFileHandler0.readline()

cellCorrespondingRangesDict = {}
cellCorrespondingFreqDict = {}

for classLabel in classLabelList:
    rowLineData = inputFileHandler0.readline().strip()
    rowLineDataElements = rowLineData.split(" ")
    for i in range(0,20):
        cellKey = classLabel + "_" + str(i)
        if cellKey not in cellCorrespondingRangesDict:
            cellCorrespondingRangesDict[cellKey] = rowLineDataElements[i+1].split(":")[0]
            cellCorrespondingFreqDict[cellKey] = int( rowLineDataElements[i+1].split(":")[1] )

print "example key:ROW1_6 value:[0,73]"
print "len(cellCorrespondingRangesDict):",len(cellCorrespondingRangesDict)
print 

print "example key:ROW1_6 value:0.38235294117599999,"
print "len(cellCorrespondingFreqDict):",len(cellCorrespondingFreqDict)
print 

# print "cellCorrespondingRangesDict:",cellCorrespondingRangesDict
# print "cellCorrespondingFreqDict:",cellCorrespondingFreqDict

inputFileHandler0.close()

print "step3: the nervous time is coming(Passed)"
part1NumOfQueryTermsCounted = 0
originalCellsTotalProbability = 0.0
for classLabel in classLabelList:
    for i in range(0,20):
        cellKey = classLabel + "_" + str(i)
        currentCellTotalProbability = cellKeyWithItsProbabilityDict[cellKey] * cellCorrespondingFreqDict[cellKey]
        part1NumOfQueryTermsCounted += cellCorrespondingFreqDict[cellKey]
        
        # for debug ONLY section
        # print "cellKey:",cellKey
        # print "cellKeyWithItsProbabilityDict[cellKey]:",cellKeyWithItsProbabilityDict[cellKey]
        # print "cellCorrespondingFreqDict[cellKey]:",cellCorrespondingFreqDict[cellKey]
        # print "currentCellTotalProbability:",currentCellTotalProbability
        
        originalCellsTotalProbability += currentCellTotalProbability

print
print "part1CellsTotalProbability:",originalCellsTotalProbability
print "part2TotalProbabilityShouldBeAdded:",part2TotalProbabilityShouldBeAdded
print "part1CellsTotalProbability+part2TotalProbabilityShouldBeAdded:",originalCellsTotalProbability + part2TotalProbabilityShouldBeAdded
print "part1NumOfQueryTermsCounted:",part1NumOfQueryTermsCounted
print "part2NumOfQueryTermsCounted:",part2NumberOfQueryTerms
print "total number of query terms processed:",part1NumOfQueryTermsCounted + part2NumberOfQueryTerms
'''

'''
# Purpose: This part of logic is to compute total # of unique term positions for a specific set of queries.
# input option1
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_2_4%_sortedByQueryTermFreq"

# input option2
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_1_10%_sortedByQueryTermFreq"

# input option3
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%_sortedByQueryTermFreq"

inputFileHandler0 = open(inputFileName,"r")

totalFreq = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    realFreq = int( lineElements[1] )
    totalFreq += realFreq
print "totalFreq:",totalFreq

inputFileHandler0.close()
'''



'''
# Purpose: make the final probability into 2D dimension format
classLabelList = ["ROW1","ROW2","ROW3","ROW4","ROW5"]
queryTermWithProbabilityDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    cellkey = lineElements[0]
    cellkeyProbablity = float( lineElements[1] )
    if cellkey not in queryTermWithProbabilityDict:
        queryTermWithProbabilityDict[cellkey] = cellkeyProbablity
print "len(queryTermWithProbabilityDict):",len(queryTermWithProbabilityDict)
inputFileHandler0.close()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418_2D_table_format"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write("table:probability(final)" + "\n")
outputFileHandler.write("Freq 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19" + "\n")
outputFileHandler.write("********************" + "\n")
for classLabel in classLabelList:
    outputLine = classLabel + " "
    
    for i in range(0,20):
        cellKey = classLabel + "_" + str(i)
        outputLine += str( queryTermWithProbabilityDict[cellKey] ) + " "
        
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)

outputFileHandler.close()
'''

'''
# Purpose: This part of code logic is to easily compute the real probability for the query terms which greater or equal to 20
# I will NOT delete the following statement cause I have made it wrong.
# And I want to leave to remind me NOT to make such mistakes again.
# (This is the wrong statement) totalNumOfQueries = 10000
totalNumberOfQueryTermPositionFor85KQueries = 351734

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%_withSomeProbabilityAdded_sortedByQueryTermFreq"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%_sortedByQueryTermFreq"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    freq = int( lineElements[1] )
    if freq >= 20:
        # The following is an wrong statement
        # (This is the wrong statement) newProbability = freq / totalNumOfQueries
        newProbability = freq / totalNumberOfQueryTermPositionFor85KQueries
        outputFileHandler.write(line.strip() + " " + str( newProbability ) + "\n")
    else:
        newProbability = -1.0
        outputFileHandler.write(line.strip() + " " + str( newProbability ) + "\n")

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
# Solve[(1 - z)^41221 == 0.85593220339, z]
print "Let's solve this problem here."
# set the init value to z = 0.5
# init value:
z = 0.5
powerNumber = 41221
trueProbability = 0.710526315789
previousValueOfZ = 0.0
previousPredictivedValue = 0.0
currentPredictivedValue = math.pow((1-z),powerNumber)
STEP_PARAMETER_for_LOOP1 = 2

# The most beautiful bound with the STEP_PARAMETER_for_LOOP2 set to be 1000000
STEP_PARAMETER_for_LOOP2 = 10000000

while currentPredictivedValue < trueProbability:
    # backup the old things
    previousValueOfZ = z
    previousPredictivedValue = currentPredictivedValue
    
    # update the new things
    z = z - z / STEP_PARAMETER_for_LOOP1
    currentPredictivedValue = math.pow((1-z),powerNumber)
    # print z,previousPredictivedValue,currentPredictivedValue,trueProbability

print "bounds produced for the round1:"
print previousValueOfZ,previousPredictivedValue,trueProbability
print z,currentPredictivedValue,trueProbability

print "**********"
# let's try the effects of make things more precise
while previousPredictivedValue < trueProbability:
    # backup the old things
    previousValueOfZ2 = previousValueOfZ
    previousPredictivedValue2 = previousPredictivedValue
    
    # update the new things
    previousValueOfZ = previousValueOfZ - previousValueOfZ / STEP_PARAMETER_for_LOOP2
    previousPredictivedValue = math.pow((1 - previousValueOfZ),powerNumber)
    # print previousValueOfZ,previousPredictivedValue2,previousPredictivedValue,trueProbability

print "bounds produced for the round2:"
print previousValueOfZ2,previousPredictivedValue2,trueProbability
print previousValueOfZ,previousPredictivedValue,trueProbability
print "**********"
'''

'''
# Updated by Wei 2013/04/18,THE PROBABILITY PROVIDED HERE IS NOT RIGHT and SHOULD BE 1 - probability
# This is OBSERVERED again by my dear prof, Torsten !
# This part of logic is to produce equations for Juan to compute.
classLabelList = ["ROW1","ROW2","ROW3","ROW4","ROW5","SUM"]
cellCorrespondingRangesDict = {}
cellCorrespondingProbabilityDict = {}

# This part of logic is to prepare some high order equations with one variable for Juan to compute
# step1: load the probability into the dict first.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityTableUsingProfIdea20130413"
inputFileHandler0 = open(inputFileName,"r")
dataLine = inputFileHandler0.readline()
while not dataLine.strip().startswith("table:probability"):
    dataLine = inputFileHandler0.readline()
# print "mark1:"
# print dataLine
# print "mark2:"
inputFileHandler0.readline()
inputFileHandler0.readline()

for classLabel in classLabelList:
    rowLineData = inputFileHandler0.readline().strip()
    rowLineDataElements = rowLineData.split(" ")
    for i in range(0,20):
        cellKey = classLabel + "_" + str(i)
        if cellKey not in cellCorrespondingRangesDict:
            cellCorrespondingRangesDict[cellKey] = rowLineDataElements[i+1].split(":")[0]
            cellCorrespondingProbabilityDict[cellKey] = float( rowLineDataElements[i+1].split(":")[1] )

print "example key:ROW1_6 value:[0,73]"
print "len(cellCorrespondingRangesDict):",len(cellCorrespondingRangesDict)
print 

print "example key:ROW1_6 value:0.38235294117599999"
print "len(cellCorrespondingProbabilityDict):",len(cellCorrespondingProbabilityDict)
print 
# print "cellCorrespondingRangesDict:",cellCorrespondingRangesDict

inputFileHandler0.close()

# step2: produce the equations for Juan to compute
NUM_OF_QUERY_TERM_POSITIONS = 41221
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/equationsForJuanToSolve"
outputFileHandler = open(outputFileName,"w")
equationInWolframFormatPart1 = "Solve[(1 - z)^" + str(NUM_OF_QUERY_TERM_POSITIONS) + " == "
equationInWolframFormatPart2 = ", z]"
wholeCompleteEquationInWolframFormat = ""
for classLabel in classLabelList:
    for i in range(0,20):
        cellKey = classLabel + "_" + str(i)
        print cellKey
        outputFileHandler.write(cellKey + "\n")
        
        wholeCompleteEquationInWolframFormat = equationInWolframFormatPart1 + str(cellCorrespondingProbabilityDict[cellKey]) + equationInWolframFormatPart2
        print wholeCompleteEquationInWolframFormat
        outputFileHandler.write(wholeCompleteEquationInWolframFormat + "\n")
        
        print
        outputFileHandler.write("\n")
        
outputFileHandler.close()
'''

'''
# Note: the # of unique query terms among one query(prof's query term positions) are 
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_1_10%"
inputFileHandler0 = open(inputFileName,"r")
totalFreq = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int(lineElements[1])
    totalFreq += currentFreq
print "totalFreq:",totalFreq
inputFileHandler0.close()
'''

'''
# Note: the # of unique query terms among one query(prof's query term positions) are 16442
# This part of logic is to fix the real probability distribution for the 4K queries.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/realFreqOfTermsIn_100KQueries_2_4%_sortedByQueryTerm_withProbablityAdded_OLD"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/realFreqOfTermsIn_100KQueries_2_4%_sortedByQueryTerm_withProbablityAdded"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int(lineElements[1])
    newProbability = currentFreq / 16442
    newOutputLine = lineElements[0] + " " + str(currentFreq) + " " + str(newProbability) + "\n"
    outputFileHandler.write(newOutputLine)

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_probability_normalized"
inputFileHandler0 = open(inputFileName,"r")


totalRealProbability = 0
totalPredictedprobability = 0

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    realProbability = float( lineElements[1] )
    predictedProbability = float( lineElements[2] )
    
    totalRealProbability += realProbability
    totalPredictedprobability += predictedProbability

print "totalRealProbability:",totalRealProbability
print "totalPredictedprobability:",totalPredictedprobability

inputFileHandler0.close()
'''

'''
# IMPORTANT INFO for the probability normalization:
dividing factor for Real Probability: 0.82125
dividing factor for Predicted Probability: 794.887275577
'''
'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_probability_unnormalized"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_probability_normalized"
outputFileHandler = open(outputFileName,"w")


totalRealProbability = 0
totalPredictedprobability = 0

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    realProbability = float( lineElements[1] )
    predictedProbability = float( lineElements[2] )
    
    new_realProbability = realProbability / 0.82125
    new_predictedProbability = predictedProbability / 794.887275577
    
    outputFileHandler.write(queryTerm + " " + str(new_realProbability) + " " + str(new_predictedProbability) + "\n")
    
    totalRealProbability += realProbability
    totalPredictedprobability += predictedProbability

print "totalRealProbability:",totalRealProbability
print "totalPredictedprobability:",totalPredictedprobability

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
TOTAL_NUM_OF_QUERIES = 4000

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/realFreqOfTermsIn_100KQueries_2_4%_sortedByQueryTerm"
outputFileName = inputFileName + "_withProbablityAdded"

inputFileHandler0 = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    freq = int(lineElements[1])
    probablity = freq / TOTAL_NUM_OF_QUERIES
    outputFileHandler.write(line.strip() + " " + str(probablity) + "\n")

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
# This part of logic is to verify the correctness of the output file called: rangesForEachCellWithEvenValueFor100KQueries_1_10%
freqMetaInfoDict = {}
inputGuideFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/rangesForEachCellWithEvenValueFor100KQueries_1_10%"
inputGuideFileHandler = open(inputGuideFileName,"r")
dataLine = inputGuideFileHandler.readline()
while dataLine:
    headInfoLine = dataLine
    headInfoLineElements = headInfoLine.strip().split(" ")
    freq = int(headInfoLineElements[0])
    totalNumOfQueryTerms = int(headInfoLineElements[1])
    # ignore the number of ranges: 5
    # averageGap = int( headInfoLineElements[3] )

    if freq not in freqMetaInfoDict:
        freqMetaInfoDict[freq] = []
            
    for i in range(0,5):
        # do 5 times
        dataInfoLine = inputGuideFileHandler.readline()
        dataInfoLineElements = dataInfoLine.strip().split(" ")
        (beginningRangeID,endingRangeID,totalFreqForThisBigRange) = (int(dataInfoLineElements[0]),int(dataInfoLineElements[1]),int(dataInfoLineElements[2]) )
        freqMetaInfoDict[freq].append( (beginningRangeID,endingRangeID,totalFreqForThisBigRange) )
    
    # ignore the empty line
    dataLine = inputGuideFileHandler.readline()
    # get the header line
    dataLine = inputGuideFileHandler.readline()
inputGuideFileHandler.close()

print "len(freqMetaInfoDict):",len(freqMetaInfoDict)
for i in range(0,20):
    print i,freqMetaInfoDict[i]

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/100KQueries_1_10%_without_query_terms_sortedByMyOwnWay"
inputFileHandler0 = open(inputFileName,"r")

smallRangeCellValue = -1
currentFreqAccumulateCounter = 0
smallRangeCellKeyFreq = -1
smallRangeCellKeyRangeID = -1
for i in range(0,20):
    for tuple in freqMetaInfoDict[i]:
        (beginningRangeID,endingRangeID,totalFreqForThisBigRange) = tuple
        
        dataLine = inputFileHandler0.readline()
        dataLineElements = dataLine.strip().split(" ")
        smallRangeCellKey = dataLineElements[0]
        smallRangeCellValue = int(dataLineElements[1])
        smallRangeCellKeyElements = smallRangeCellKey.split("_")
        smallRangeCellKeyFreq = int(smallRangeCellKeyElements[0])
        smallRangeCellKeyRangeID = int(smallRangeCellKeyElements[1])
        
        while smallRangeCellKeyFreq == i and smallRangeCellKeyRangeID >= beginningRangeID and smallRangeCellKeyRangeID <= endingRangeID:
            # for debug ONLY
            # print "smallRangeCellValue:",smallRangeCellValue,"smallRangeCellKeyFreq:",smallRangeCellKeyFreq,"smallRangeCellKeyRangeID:",smallRangeCellKeyRangeID
            currentFreqAccumulateCounter += smallRangeCellValue
            dataLine = inputFileHandler0.readline()
            if dataLine.strip() != "":
                dataLineElements = dataLine.strip().split(" ")
                smallRangeCellKey = dataLineElements[0]
                smallRangeCellValue = int(dataLineElements[1])
                smallRangeCellKeyElements = smallRangeCellKey.split("_")
                smallRangeCellKeyFreq = int(smallRangeCellKeyElements[0])
                smallRangeCellKeyRangeID = int(smallRangeCellKeyElements[1])
            else:
                if currentFreqAccumulateCounter == totalFreqForThisBigRange:
                    print "PASS","Value:",currentFreqAccumulateCounter
                    exit(1)
                else:
                    print "mark3,error"
                    exit(1)
                
                print "EOF"

        if currentFreqAccumulateCounter == totalFreqForThisBigRange:
            print "PASS","Value:",currentFreqAccumulateCounter
            # reset to be 0
            currentFreqAccumulateCounter = 0
            # add the missing smallRangeCellValue
            currentFreqAccumulateCounter += smallRangeCellValue
            # for debug ONLY
            # print "smallRangeCellValue:",smallRangeCellValue,"smallRangeCellKeyFreq:",smallRangeCellKeyFreq,"smallRangeCellKeyRangeID:",smallRangeCellKeyRangeID 
        else:
            print "currentFreqAccumulateCounter:",currentFreqAccumulateCounter
            print "totalFreqForThisBigRange:",totalFreqForThisBigRange
            print "Mark2,Unexpected Behaviour"
            exit(1)

inputFileHandler0.close()
'''

'''
classFreqDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_fix_zerop_freq_sortedByMyOwnWay"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in classFreqDict:
        classFreqDict[key] = value
    else:
        print "unexpected behaviour"
        exit(1)

print "len(classFreqDict):",len(classFreqDict)

# modified values
totalFreqExceptZero = 0

classLabelList = []
for i in range(0,1000):
    classLabelList.append( str(i) )
print "len(classLabelList):",len(classLabelList)

for classLabel in classLabelList:
    # horizontal computations
    totalFreqExceptZero = 0
    for i in range(0,20):
        key = str(i) + "_" + classLabel
        totalFreqExceptZero += classFreqDict[key]
    
    # for easy application
    print classLabel,totalFreqExceptZero
 
inputFileHandler0.close()
'''

'''
# This part is just check the correctness of the file 1000SmallRangesWithNumOfTermsBelongingTo
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/1000SmallRangesWithNumOfTermsBelongingTo"
inputFileHandler0 = open(inputFileName,"r")
totalNumOfQueryTerms = 0

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    value = int( lineElements[3] )
    totalNumOfQueryTerms += value

print "totalNumOfQueryTerms:",totalNumOfQueryTerms
inputFileHandler0.close()
'''

'''
# check the correctness of the small buckets for the two table files.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay"
inputFileHandler0 = open(inputFileName,"r")

print "forDenominatorTable"

currentPrefix = "N/A"
currentFreqTotalSum = 0

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int( lineElements[1] )
    if currentPrefix == key.split("_")[0]:
        currentFreqTotalSum += value
    else:
        print currentPrefix, currentFreqTotalSum
        currentPrefix = key.split("_")[0]
        currentFreqTotalSum = 0
        currentFreqTotalSum += value
print currentPrefix, currentFreqTotalSum
inputFileHandler0.close()

# check the correctness of the small buckets for the two table files.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/100KQueries_1_10%_without_query_terms_sortedByMyOwnWay"
inputFileHandler0 = open(inputFileName,"r")

print "forMolecularTable"

currentPrefix = "N/A"
currentFreqTotalSum = 0

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int( lineElements[1] )
    if currentPrefix == key.split("_")[0]:
        currentFreqTotalSum += value
    else:
        print currentPrefix, currentFreqTotalSum
        currentPrefix = key.split("_")[0]
        currentFreqTotalSum = 0
        currentFreqTotalSum += value
print currentPrefix, currentFreqTotalSum
inputFileHandler0.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/smallBucketsRangesForQueryTermOverGov2Dataset"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/smallBucketsRangesForQueryTermOverGov2Dataset_withUniqueIDAdded"
outputFileHandler = open(outputFileName,"w")

for index,line in enumerate( inputFileHandler0.readlines() ):
    outputFileHandler.write( str(index) + " " + line.strip() + "\n")

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
# this part of logic is to compute NUM_OF_QUERY_TERMS_SEEN_WHICH_FREQ_LESS_THAN_20
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay"
inputFileHandler0 = open(inputFileName,"r")
totalNumOfSeenWrods = 0
blackKeyList = ["0_VF","0_F","0_M","0_NF","0_VR"]
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in blackKeyList:
        totalNumOfSeenWrods += value
    else:
        print key,"NOT included."

print "totalNumOfSeenWrods:",totalNumOfSeenWrods
'''

'''
# ["VR","NF","M","F","VF"]
# [1 - 100)
# [100 - 5000)
# [5000 - 80,000)
# [80,000 - 600,000)
# [600,000 - ending)

UPPER_BOUND_FOR_RANGE1 = 100
UPPER_BOUND_FOR_RANGE2 = 5000
UPPER_BOUND_FOR_RANGE3 = 80000
UPPER_BOUND_FOR_RANGE4 = 600000

num_terms_in_VR_counter = 0
num_terms_in_NF_counter = 0
num_terms_in_M_counter = 0
num_terms_in_F_counter = 0
num_terms_in_VF_counter = 0



# the purpose of this program is to count how many terms belong to the ranges.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength.txt"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    lengthOfListForLexiconTerm = int(lineElements[1])
    
    if lengthOfListForLexiconTerm >= 1 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE1:
        # it is very rare
        # example:
        # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
        num_terms_in_VR_counter += 1

    elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE1 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE2:
        # it is not frequent
        # example:
        # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
        num_terms_in_NF_counter += 1

    elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE2 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE3:
        # it is medium
        # example:
        # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
        num_terms_in_M_counter += 1

    elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE3 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE4:
        # it is frequent
        # example:
        # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
        num_terms_in_F_counter += 1

    elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE4:
        # it is very frequent
        # example:
        # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
        num_terms_in_VF_counter += 1
    
inputFileHandler0.close()

print "Begins..."
print "num of lexicons belonging to each range"
print "num_terms_in_VR_counter:",num_terms_in_VR_counter
print "num_terms_in_NF_counter:",num_terms_in_NF_counter
print "num_terms_in_M_counter:",num_terms_in_M_counter
print "num_terms_in_F_counter:",num_terms_in_F_counter
print "num_terms_in_VF_counter:",num_terms_in_VF_counter
print "Ends."
'''

'''
# DO NOT USE THIS PART OF LOGIC
# This part of logic has been moved to a seperate program called: gov2_Phase2_make_easy_looking_table_for_prof_to_check.py
# Also note that this part of logic will NOT longer be updated
# make the easy looking table for prof to check
# also with some numbers in the denominator smoothing(average the neighbours)
classLabelList = ["VF","F","M","NF","VR"]
denominatorDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in denominatorDict:
        denominatorDict[key] = value
    else:
        print "mark1"
        exit(1)

print "len(denominatorDict):",len(denominatorDict)
# do a smoothing for the value of 0
# the smoothing is very simple, just average the two sides.
print "smoothing in..."
for classLabel in classLabelList:
    outputLine = classLabel + " "
    for i in range(0,20):
        key = str(i) + "_" + classLabel
        if denominatorDict[key] == 0:
            print "original:",key,denominatorDict[key]
            previousAuxSmoothingKey = str(i-1) + "_" + classLabel
            nextAuxSmoothingKey = str(i+1) + "_" + classLabel
            denominatorDict[key] = (denominatorDict[previousAuxSmoothingKey] + denominatorDict[nextAuxSmoothingKey])/2
            print "     new:",key,denominatorDict[key]
print "smoothing out."        
        
# this is just for checking
for key in denominatorDict:
    if denominatorDict[key] == 0:
        print key,denominatorDict[key]
        print "not expected"
        exit(1)

inputFileHandler0.close()

molecularDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/100KQueries_1_10%_without_query_terms_sortedByMyOwnWay"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in molecularDict:
        molecularDict[key] = value
    else:
        print "mark2"
        exit(1)

print "len(molecularDict):",len(molecularDict)
inputFileHandler0.close()

# One output file containing 3 tables
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/probabilityTableUsingProfIdea20130410"
outputFileHandler = open(outputFileName,"w")

outputFileHandler.write("table:denominator" + "\n")
outputFileHandler.write("NUM_OF_QUERY_TERMS_SEEN_WHICH_FREQ_LESS_THAN_20:?" + "\n")
outputFileHandler.write("NUM_OF_QUERIS_COUNT:Head_85K" + "\n")
outputFileHandler.write("Freq 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19" + "\n")
outputFileHandler.write("********************" + "\n")
for classLabel in classLabelList:
    outputLine = classLabel + " "
    for i in range(0,20):
        key = str(i) + "_" + classLabel
        outputLine += str(denominatorDict[key]) + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)

outputFileHandler.write("\n")
outputFileHandler.write("table:molecular" + "\n")
outputFileHandler.write("NUM_OF_QUERY_TERMS_NEW:?" + "\n")
outputFileHandler.write("NUM_OF_QUERIS_COUNT:FROM_85K_to_95K" + "\n")
outputFileHandler.write("Freq 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19" + "\n")
outputFileHandler.write("********************" + "\n")
for classLabel in classLabelList:
    outputLine = classLabel + " "
    for i in range(0,20):
        key = str(i) + "_" + classLabel
        outputLine += str(molecularDict[key]) + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)


outputFileHandler.write("\n")
outputFileHandler.write("table:probability" + "\n")
outputFileHandler.write("Freq 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19" + "\n")
outputFileHandler.write("********************" + "\n")
for classLabel in classLabelList:
    outputLine = classLabel + " "
    for i in range(0,20):
        key = str(i) + "_" + classLabel
        outputLine += str(molecularDict[key]/denominatorDict[key]) + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)
    
outputFileHandler.close()
'''

'''
# do the histogram thing for the length of the inverted list over the gov2 dataset
freqOfFreqDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/histogramForTheLengthOfTheListOverGov2Dataset"
outputFileHandler = open(outputFileName,"w")


dataLine = inputFileHandler0.readline()
while dataLine:
    dataLineElements = dataLine.strip().split(" ")
    length = int(dataLineElements[1])
    
    if length not in freqOfFreqDict:
        freqOfFreqDict[length] = 1
    else:
        freqOfFreqDict[length] += 1
    
    dataLine = inputFileHandler0.readline()

#output the freqOfFreqDict
for freq in freqOfFreqDict:
    outputFileHandler.write(str(freq) + " " + str(freqOfFreqDict[freq]) + "\n")

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
# checking the correctness of each cell.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_fix_zerop_freq_sortedByMyOwnWay"
inputFileHandler0 = open(inputFileName,"r")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/100KQueries_1_10%_without_query_terms_sortedByMyOwnWay"
inputFileHandler2 = open(inputFileName2,"r")

lineFromFile1 = inputFileHandler0.readline()
lineFromFile2 = inputFileHandler2.readline()

while lineFromFile1:
    line1Elements = lineFromFile1.strip().split(" ")
    line2Elements = lineFromFile2.strip().split(" ")
    # print line1Elements,line2Elements
    
    key1 = line1Elements[0]
    value1 = int(line1Elements[1])

    key2 = line2Elements[0]
    value2 = int(line2Elements[1])
    
    # print "key1:",key1,"key2:",key2
    # print value1,value2
    
    if key1 != key2:
        print key1,key2
        print "Unexpected Behavior"
        exit(1)
    else:
        if value1 < value2:
            print key1,value1,key2,value2
            print "Unexpected Behavior"
            exit(1)
        else:
            pass
    
    print value1,value2
    
    lineFromFile1 = inputFileHandler0.readline()
    lineFromFile2 = inputFileHandler2.readline()    

print "Final Pass"
inputFileHandler0.close()
inputFileHandler2.close()
'''

'''
# The following code part is used for fixing zero freq problem in the file called: freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay
smallRangeIDAndZeroFreqDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/eachSmallRangeInfo20130412_sortedBySmallRangeID"
inputAuxFileHandler = open(inputAuxFileName,"r")
for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    smallRangeID = lineElements[0]
    freq = int(lineElements[3])
    if smallRangeID not in smallRangeIDAndZeroFreqDict:
        smallRangeIDAndZeroFreqDict[smallRangeID] = freq
    else:
        print "Unexpected Behaviour"
        exit(1)

print "len(smallRangeIDAndZeroFreqDict):",len(smallRangeIDAndZeroFreqDict)
inputAuxFileHandler.close()

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_fix_zerop_freq_sortedByMyOwnWay"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    if lineElements[0].startswith("0_"):
        # need to fix the corresponding value
        smallRangeID = lineElements[0].split("_")[1]
        cellKey = "0_" + smallRangeID
        newZeroFreqValue = smallRangeIDAndZeroFreqDict[smallRangeID]
        
        outputFileHandler.write(cellKey + " " + str(newZeroFreqValue) + "\n")
    else:
        outputFileHandler.write(line)

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
# We need to extend this part of logic a little bit.
# This part of script just show how many terms left for the zero probability.
# Latest results for the head 85K queries

# extended solution:

# OLD solution: manually change the value for (1)0_VF, (2)0_F, (3)0_M, (4)0_NF, (5)0_VR
#    except zero appearance total      left for zero appearance
#VF  905                    2141       1236
#F   4712                   6837       2125
#M   16787                  36148      19361
#NF  29144                  807526     778382
#VR  32752                  36875967   36843215           

eachSmallRangeIDWithTotalFreqDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/1000SmallRangesWithNumOfTermsBelongingTo"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    smallRangeID = lineElements[0]
    totalFreqForThisRange = int( lineElements[3] )
    
    if smallRangeID not in eachSmallRangeIDWithTotalFreqDict:
        eachSmallRangeIDWithTotalFreqDict[smallRangeID] = totalFreqForThisRange
    else:
        print "Unexpected Behaviour"
        exit(1)

inputFileHandler0.close()

eachSmallRangeIDWithTotalFreqExceptZeroFreqDict = {}
classFreqDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in classFreqDict:
        classFreqDict[key] = value
    else:
        print "unexpected behaviour"
        exit(1)

print "len(classFreqDict):",len(classFreqDict)

# modified values
totalFreqExceptZero = 0

classLabelList = []
for i in range(0,1000):
    classLabelList.append( str(i) )
print "len(classLabelList):",len(classLabelList)

for classLabel in classLabelList:
    # horizontal computations
    totalFreqExceptZero = 0
    for i in range(1,20):
        key = str(i) + "_" + classLabel
        totalFreqExceptZero += classFreqDict[key]
    
    if classLabel not in eachSmallRangeIDWithTotalFreqExceptZeroFreqDict:
        eachSmallRangeIDWithTotalFreqExceptZeroFreqDict[classLabel] = totalFreqExceptZero
    
    # for easy application
    # print classLabel,totalFreqExceptZero
 
inputFileHandler0.close()

print "len( eachSmallRangeIDWithTotalFreqDict ):",len(eachSmallRangeIDWithTotalFreqDict)
print "len( eachSmallRangeIDWithTotalFreqExceptZeroFreqDict ):",len(eachSmallRangeIDWithTotalFreqExceptZeroFreqDict)

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/eachSmallRangeInfo20130412"
outputFileHandler = open(outputFileName,"w")
# Or I can record this info from the my feeling document
# outputFileHandler.write("smallRangeID" + " " + "totalFreqForThisRow" + " " + "totalFreqForThisRowExceptZeroFreq" + " " + "zeroFreqForEachSmallRow")
for key in eachSmallRangeIDWithTotalFreqDict:
    zeroFreqForCurrentSmallRange = eachSmallRangeIDWithTotalFreqDict[key] - eachSmallRangeIDWithTotalFreqExceptZeroFreqDict[key]
    outputFileHandler.write(str(key) + " " + str( eachSmallRangeIDWithTotalFreqDict[key] ) + " " + str( eachSmallRangeIDWithTotalFreqExceptZeroFreqDict[key] ) + " " + str(zeroFreqForCurrentSmallRange) + "\n")
outputFileHandler.close()
'''



'''
# This part of logic is help to compute # of query terms belonging to each ranges (Both in small ranges or big ranges)
print "program begins..."

tupleRangeWithFreqList = []
tupleRangeWithCounterDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/smallBucketsRangesForQueryTermOverGov2Dataset_withUniqueIDAdded"
inputAuxFileHandler = open(inputAuxFileName,"r")
for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentLowerBound = int(lineElements[3])
    currentUpperBound = int(lineElements[4])
    keyTuple = (currentLowerBound,currentUpperBound)
    if keyTuple not in tupleRangeWithFreqList:
        tupleRangeWithFreqList.append(keyTuple)
        tupleRangeWithCounterDict[keyTuple] = 0
    else:
        print "Unexpected Behaviour"
        exit(1)

print "len(tupleRangeWithFreqList):",len(tupleRangeWithFreqList)
inputAuxFileHandler.close()

# in debugging
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_tail_8700.txt"
# in production 
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength.txt"
inputFileHandler0 = open(inputFileName,"r")
dataLine = inputFileHandler0.readline()
counterForTest = 0
currentRangeIndex = 0
(currentRangeLowerBound,currentRangeUpperBound) = tupleRangeWithFreqList[currentRangeIndex]

while dataLine:    
    if counterForTest % 10000 == 0:
        print "Process:",counterForTest
        
    counterForTest += 1
    # do some logic here.
    dataLineElements = dataLine.strip().split(" ")
    lengthOfListForLexiconTerm = int( dataLineElements[1] )
    
    if lengthOfListForLexiconTerm >= currentRangeLowerBound and lengthOfListForLexiconTerm < currentRangeUpperBound:
        tupleRangeWithCounterDict[tupleRangeWithFreqList[currentRangeIndex]] += 1
    else:
        while not (lengthOfListForLexiconTerm >= currentRangeLowerBound and lengthOfListForLexiconTerm < currentRangeUpperBound):
            if currentRangeIndex == 999:
                break
            currentRangeIndex += 1
            print "lengthOfListForLexiconTerm:",lengthOfListForLexiconTerm
            print "currentRangeIndex:",currentRangeIndex
            print "currentRangeLowerBound:",currentRangeLowerBound
            print "currentRangeUpperBound:",currentRangeUpperBound
            (currentRangeLowerBound,currentRangeUpperBound) = tupleRangeWithFreqList[currentRangeIndex]
        
        tupleRangeWithCounterDict[tupleRangeWithFreqList[currentRangeIndex]] += 1
        
          
    dataLine = inputFileHandler0.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/1000SmallRangesWithNumOfTermsBelongingTo"
outputFileHandler = open(outputFileName,"w")

for index,tuple in enumerate(tupleRangeWithFreqList):
    (lowerBoundScore,upperBoundScore) = tuple
    outputFileHandler.write(str(index) + " " + str(lowerBoundScore) + " " + str(upperBoundScore) + " " + str( tupleRangeWithCounterDict[tuple] ) + "\n")

outputFileHandler.close()
print "program ends."
'''

'''
# This part of logic is to order the list in my thinking order
orderJustificationDictForFile1 = {}
orderJustificationDictForFile2 = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in orderJustificationDictForFile1:
        orderJustificationDictForFile1[key] = value
    else:
        print "mark1, unexpected behaviour"
        exit(1)

print "len(orderJustificationDictForFile1):",len(orderJustificationDictForFile1)
inputFileHandler0.close()



inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/100KQueries_1_10%_without_query_terms"
inputFileHandler2 = open(inputFileName2,"r")
for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in orderJustificationDictForFile2:
        orderJustificationDictForFile2[key] = value
    else:
        print "mark2, unexpected behaviour"
        exit(1)
print "len(orderJustificationDictForFile2):",len(orderJustificationDictForFile2)
inputFileHandler2.close()

classLabelList = []
for i in range(0,1000):
    classLabelList.append( str(i) )
print "len(classLabelList):",len(classLabelList)

outputFileName = inputFileName + "_sortedByMyOwnWay"
outputFileHandler = open(outputFileName,"w")

for i in range(0,20):
    for freqLevelLabel in classLabelList:
        key = str(i) + "_" + freqLevelLabel
        outputFileHandler.write(key + " " + str( orderJustificationDictForFile1[key] ) + "\n")

outputFileHandler.close()

outputFileName2 = inputFileName2 + "_sortedByMyOwnWay"
outputFileHandler2 = open(outputFileName2,"w")

for i in range(0,20):
    for freqLevelLabel in classLabelList:
        key = str(i) + "_" + freqLevelLabel
        outputFileHandler2.write(key + " " + str( orderJustificationDictForFile2[key] ) + "\n")

outputFileHandler2.close()
'''

##################################################################################################################################
'''
totalFreq = 0
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/freqOfFreqInQueries_head_95K_sortedByFreqR_2D_without_query_terms_sortedByKey.txt"
inputFileHandler0 = open(inputFileName,"r")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    totalFreq += int(lineElements[1])
print "totalFreq:",totalFreq

inputFileHandler0.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/freqOfFreqInQueries_head_95K_sortedByFreqR.txt"
inputFileHandler0 = open(inputFileName,"r")

numOfKindsWhichHasFreqGreaterOrEqualTo20 = 0

for line in inputFileHandler0.readlines()[19:]:
    lineElements = line.strip().split(" ")
    freqR = int(lineElements[0])
    freqOfFreqNr = int(lineElements[1])
    numOfKindsWhichHasFreqGreaterOrEqualTo20 += freqOfFreqNr

print "numOfKindsWhichHasFreqGreaterOrEqualTo20:",numOfKindsWhichHasFreqGreaterOrEqualTo20
inputFileHandler0.close()
'''


'''
# the sum value of column3 is: 413533
# But I am not going to use this value.
# Instead, the DENOMINATOR will be set to be 100K
DENOMINATOR = 100000

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries_extended_sortedByQueryTermFreq.txt"
inputFileHandler0 = open(inputFileName,"r")
queryTermDictWithInfoTuple = {}

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    realFreqInQueries = int( lineElements[2] )
    if realFreqInQueries > 20:
        print line.strip()
        break

    if queryTerm not in queryTermDictWithInfoTuple:
        originalProbability = realFreqInQueries/ DENOMINATOR
        modifiedProbability = originalProbability
        valueTuple = (realFreqInQueries, originalProbability,modifiedProbability)
        queryTermDictWithInfoTuple[queryTerm] = valueTuple 

print "len(queryTermDictWithInfoTuple):",len(queryTermDictWithInfoTuple)
 
inputFileHandler0.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries_extended_sortedByQueryTermFreq.txt"
inputFileHandler0 = open(inputFileName,"r")

totalFreq = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    print lineElements
    totalFreq += int( lineElements[2] )

print "totalFreq:",totalFreq
inputFileHandler0.close()
'''

# Let's do the simulation here
# step1: load all the necessary tuple into the main memory
# step2: sort the list based on a specific factor. In memory will have the tuple (0,8400333,25,1.63844456821e-05,percentageBelongingTo,threshold)

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollectionANDTermFreqANDProbabilityInQueryTrace.txt"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded.txt"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    decidingValue = int( lineElements[2] ) * float( lineElements[3] )
    outputFileHandler.write(line.strip() + " " + str(decidingValue) + "\n")

outputFileHandler.close()
inputFileHandler0.close()
'''

'''
# number of terms in lexicon: 37728619
# number of terms in queries: 38871
# increase freq beyond my imagination: 37728619
# the new total freq(including my imagination: 417316 + 37728619 = 38145935)

queryTermWithFreqInQueriesDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries.txt"
inputFileHandler0 = open(inputFileName,"r")
totalFreq = 0
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreqInQueries = int(lineElements[1])
    totalFreq += queryTermFreqInQueries
    if queryTerm not in queryTermWithFreqInQueriesDict:
        queryTermWithFreqInQueriesDict[queryTerm] = queryTermFreqInQueries
print "totalFreq:",totalFreq
inputFileHandler0.close()

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollectionANDTermFreqANDProbabilityInQueryTrace.txt"
outputFileHandler = open(outputFileName,"w")

# This is exactly the add one smoothing method, but prof know that is horrible
for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    # I need only the queryTerm
    queryTerm = lineElements[0]
    if queryTerm not in queryTermWithFreqInQueriesDict:
        newFreq = 1
        outputFileHandler.write(line.strip() + " " + str(newFreq) + " " + str( newFreq/38145935 ) + "\n")
    else:
        newFreq = queryTermWithFreqInQueriesDict[queryTerm] + 1
        outputFileHandler.write(line.strip() + " " + str(newFreq) + " " + str( newFreq/38145935 ) + "\n")

inputFileHandler0.close()
outputFileHandler.close()
'''



'''
# step1: load all the 100KQueryTerms with their term freq in collection to the main memory using a dict structure
# the size of this dict will be: 38871
# key: term 
# value: term_freq_in_collection

newTermPopUp = 0

queryTermBeingTouchCurrentlyDict = {}
queryTermBeingTouchCurrentlyList = []

numOfUniquePostingsBeingTouchedCurrently = 0

queryTermDictWithFreqInCollection = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputFileHandler0 = open(inputFileName,"r")

dataLine = inputFileHandler0.readline()

while dataLine:
    dataLineElements = dataLine.strip().split(" ")
    queryTerm = dataLineElements[0]
    queryTermFreqInCollection = int(dataLineElements[1])
    if queryTerm not in queryTermDictWithFreqInCollection:
        queryTermDictWithFreqInCollection[queryTerm] = queryTermFreqInCollection
    else:
        print "error, mark1"
    
    # print "weiIndexPostingCount:",weiIndexPostingCount
    dataLine = inputFileHandler0.readline()

print "len(queryTermDictWithFreqInCollection):",len(queryTermDictWithFreqInCollection)
inputFileHandler0.close()

# step2: load a certain fraction of queries in the 100K to see how many postings have been covered.
queryContentDict = {}
inputQueryFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueries"
inputQueryHandler = open(inputQueryFileName,"r")
for index,line in enumerate( inputQueryHandler.readlines() ):
    elements = line.strip().split(":")
    queryID = int(elements[0])
    
    data = elements[1]
    data = data.lower()
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    
    queryContent = data

    if queryID not in queryContentDict:
        queryContentDict[queryID] = queryContent
        # Let's do some content analysis on this
        queryContentElements = queryContent.strip().split(" ")
        for term in queryContentElements:
            if term.strip() != "" and term.strip() in queryTermDictWithFreqInCollection:
                if term.strip() not in queryTermBeingTouchCurrentlyDict:
                    queryTermBeingTouchCurrentlyDict[ term.strip() ] = 1
                    numOfUniquePostingsBeingTouchedCurrently += queryTermDictWithFreqInCollection[term.strip()]
                    newTermPopUp += 1
                else:
                    queryTermBeingTouchCurrentlyDict[ term.strip() ] += 1
            else:
                if term.strip() != "":
                    print "the term:",term.strip(),"is NOT in the lexicon"
    
    if (index+1) == 2:
        print index+1,numOfUniquePostingsBeingTouchedCurrently,newTermPopUp
        newTermPopUp = 0
    
    if (index+1) % 10000 == 0:  
        print index+1,numOfUniquePostingsBeingTouchedCurrently,newTermPopUp
        newTermPopUp = 0

    
    #print "queryTermBeingTouchCurrentlyDict:",queryTermBeingTouchCurrentlyDict
    #tempCheckPostingCounter = 0
    #for element in queryTermBeingTouchCurrentlyDict:
    #    tempCheckPostingCounter += queryTermDictWithFreqInCollection[element]
    #    print element," ",queryTermDictWithFreqInCollection[element]
    #print "tempCheckPostingCounter:",tempCheckPostingCounter
    

for queryTerm in queryTermBeingTouchCurrentlyDict:
    queryTermBeingTouchCurrentlyList.append( (queryTerm,queryTermBeingTouchCurrentlyDict[queryTerm]) )

queryTermBeingTouchCurrentlyList.sort(cmp=None, key=itemgetter(1), reverse=True)

for tuple in queryTermBeingTouchCurrentlyList[0:10]:
    print tuple
    
print "len(queryTermBeingTouchCurrentlyDict):",len(queryTermBeingTouchCurrentlyDict)
   
# print "----->","len(queryContentDict):",len(queryContentDict)
inputQueryHandler.close()
'''


'''
# key: trecID value: dict 
    # subKey: queryTerm subValue: a tuple with the following format(4.45178,718986,11.9503)
    # the meaning of the tuple is (4.45178,718986,11.9503)
    # 4.45178: BM25
    # 718986: postingProbabilityGivenTheQueryTimesBigNum1000000(I am NOT sure what is the value of the big number???)
    # 11.9503: postingProbabilityInGeneralTimesBigNum1000000(I am NOT sure what is the value of the big number???)
forwardIndexInMainMemory = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirForwardIndex_for_debug_ONLY"
inputFileHandler0 = open(inputFileName,"r")

# This is the headline info
headLine = inputFileHandler0.readline()

dataLine = inputFileHandler0.readline()
while dataLine:
    data = dataLine.strip()
    dataElements = data.split(" ")
    trecID = dataElements[0]
    docSizeInWords = int( dataElements[1] ) # This field will be ignored
    docPostingsRecorded = int( dataElements[2] )
    
    if trecID not in forwardIndexInMainMemory:
        forwardIndexInMainMemory[trecID] = {}
    else:
        # the word dict for the current document has been build.
        # so there is NO logic for that
        pass
    
    for i in range(0+3,docPostingsRecorded+3):
        tupleElements = dataElements[i].split("(")[1].split(")")[0].split(",")
        # the tuple element will have the following forms:
        # ['www', '0.435614', '262071', '3.73365']
        # subKey: tupleElements[0]
        # subValue: (0.435614,262071,3.73365)
        if tupleElements[0] not in forwardIndexInMainMemory[trecID]:
            valueTuple = (tupleElements[1],tupleElements[2],tupleElements[3])
            forwardIndexInMainMemory[trecID][ tupleElements[0] ] = valueTuple
    # for debug
    # print "forwardIndexInMainMemory:",forwardIndexInMainMemory
    # print "len(forwardIndexInMainMemory):",len(forwardIndexInMainMemory)
    dataLine = inputFileHandler0.readline()
inputFileHandler0.close()
# print "len(forwardIndexInMainMemory):",len(forwardIndexInMainMemory)
# exit(1)
'''

'''
# build the histogram for X-Axis
freqOfTermPairFreqDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_term_pair_freq"
inputFileHandler0 = open(inputFileName,"r")

# example line: 0 0001_3064 1
# ...
# example line: 401482 of_the 1863

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    termPairFreq = int( lineElements[2] )
    # hold
    if termPairFreq not in freqOfTermPairFreqDict:
        freqOfTermPairFreqDict[termPairFreq] = 1
    else:
        freqOfTermPairFreqDict[termPairFreq] += 1

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairIThinkCorrectHistogram20130318_NOT_sorted"
outputFileHandler = open(outputFileName,"w")
for termPairFreq in freqOfTermPairFreqDict:
    outputFileHandler.write(str(termPairFreq) + " " + str( freqOfTermPairFreqDict[termPairFreq] ) + "\n")

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
print "calculate the overall statistics"
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_uniqueID_termPairProbabilityFromQueryTraceAdded"
inputFileHandler0 = open(inputFileName,"r")
originalLineCounter = 0
overallCheckFreq = 0
overallCheckProbability = 0.0
for line in inputFileHandler0.readlines():
    originalLineCounter += 1
    lineElements = line.strip().split(" ")
    termPairFreq = int( lineElements[2] )
    termPairProbability = float( lineElements[3] )
    
    overallCheckFreq += termPairFreq
    overallCheckProbability += termPairProbability

print "originalLineCounter:",originalLineCounter
print "overallCheckFreq:",overallCheckFreq
print "overallCheckProbability:",overallCheckProbability

inputFileHandler0.close()
'''

'''
# doing this like a 2 passes
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_uniqueID"
inputFileHandler0 = open(inputFileName,"r")
totalTermPairFreq = 0

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    indexNumber = lineElements[0]
    termPairContent = lineElements[1]
    termPairFreq = int( lineElements[2] )
    totalTermPairFreq += termPairFreq

inputFileHandler0.close()

print "totalTermPairFreq:",totalTermPairFreq

inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_uniqueID"
inputFileHandler1 = open(inputFileName1,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_uniqueID_termPairProbabilityFromQueryTraceAdded"
outputFileHandler = open(outputFileName,"w")


for line in inputFileHandler1.readlines():
    lineElements = line.strip().split(" ")
    termPairFreq = int( lineElements[2] )
    termPairProbability = termPairFreq / totalTermPairFreq 
    outputFileHandler.write( line.strip() + " " + str(termPairProbability) + "\n")


inputFileHandler0.close()
outputFileHandler.close()
'''

'''
# step1:
termPairUniqueIDWithTermPairAndFreqDict = {}

inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_uniqueID"
inputAuxFileHandler = open(inputAuxFileName,"r")
for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    uniqueID = lineElements[0]
    termPair = lineElements[1]
    termPairFreq = lineElements[2]
    tuple = (termPair,termPairFreq)
    if uniqueID not in termPairUniqueIDWithTermPairAndFreqDict:
        termPairUniqueIDWithTermPairAndFreqDict[uniqueID] = tuple
inputAuxFileHandler.close()

print "len(termPairUniqueIDWithTermPairAndFreqDict):",len(termPairUniqueIDWithTermPairAndFreqDict)

# step2:
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirConnectedEdges_for_debug_ONLY"
inputFileHandler0 = open(inputFileName,"r")
inputFileHandler0.readline()

dataLine = inputFileHandler0.readline()
dataLineElements = dataLine.strip().split(" ")
 
docID = dataLineElements[0]
NUM_OF_EDGES = int(dataLineElements[1])

for i in range(2,2 + NUM_OF_EDGES):
    uniqueID = dataLineElements[i]
    if uniqueID not in termPairUniqueIDWithTermPairAndFreqDict:
        print "system error"
        exit(1)
    else:
        (termPair,termPairFreq) = termPairUniqueIDWithTermPairAndFreqDict[uniqueID]
        print uniqueID,termPair,termPairFreq
'''  
    

'''
outputLineList = []

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_02_22.train_tfqAdded_labelsAdded_sortedByTerm_rankInListAdded_sortedByDocID_rankInDocAdded_freqInCollectionAdded_sortedByQueryID_fixed"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/small_set_of_gov2_documents_needed_to_be_extracted_for_training_queries_sorted_by_qid"
outputFileHandler = open(outputFileName,"w")

# ingore the header line
inputFileHandler0.readline()

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    # for debug ONLY
    # print len(lineElements)
    
    # index directory listing
    # queryID 0
    # trecID 1
    # term 2
    # partialBM25 3 
    # length_of_the_inverted_index 4 
    # term_freq_in_doc 5
    # doc_words 6
    # overallBM25Score 7 
    # rank_in_this_results_list_for_this_query 8 
    # term_freq_in_queries 9
    # TOP10Label 10
    # posting_rank_in_list 11 
    # posting_rank_in_doc 12
    # term_freq_in_collection 13
    
    
    queryID = lineElements[0]
    trecID =  lineElements[1]
    overallBM25Score = lineElements[7]
    rank_in_this_results_list_for_this_query = lineElements[8]
    outputLine = queryID + " " + rank_in_this_results_list_for_this_query + " " + overallBM25Score + " " + "N/A" + " " + trecID + "\n"
    if outputLine not in outputLineList:
        outputLineList.append(outputLine)

for line in outputLineList:
    outputFileHandler.write(line)

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
outputFilePathList = []
outputFilePathDict = {}

# for the machine pangolin
basePath = "/data/jhe/trecdata"

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/small_set_of_gov2_documents_needed_to_be_extracted_for_training_queries_sorted_by_trecID"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/small_set_of_document_collection_files_for_training_queries_needed_to_be_processed"
outputFileHandler = open(outputFileName,"w")

# sample output line
# /data/jhe/trecdata/GX000/00.gz

for line in inputFileHandler0.readlines():
    trecID = line.strip().split(" ")[4]
    trecIDElements = trecID.strip().split("-")
    folderName = trecIDElements[0]
    segmentName = trecIDElements[1]
    finalPath = basePath + "/" + folderName + "/" + segmentName + ".gz"
    if finalPath not in outputFilePathDict:
        outputFilePathDict[finalPath] = 1
        outputFilePathList.append(finalPath)
    else:
        # just do nothing
        pass

print "len(outputFilePathList):",len(outputFilePathList)
print "len(outputFilePathDict):",len(outputFilePathDict)

for filePath in outputFilePathList:
    outputFileHandler.write(filePath + "\n")
    # print filePath 


inputFileHandler0.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirConnectedEdges"
inputFileHandler0 = open(inputFileName,"r")
# ignore the header line
inputFileHandler0.readline()

dataLine = inputFileHandler0.readline()
dataLineElements = dataLine.strip().split(" ")
docID = dataLineElements[0]
NUM_OF_EDGES = int( dataLineElements[1] )
for i in range(0,NUM_OF_EDGES,2):
    print "i:",i
    if dataLineElements[2+i] == dataLineElements[2 + i + 1]:
        print dataLineElements[2+i],dataLineElements[2 + i + 1]
    else:
        print "result NOT expected"
        print "dataLineElements[2+i]:",i,dataLineElements[2+i]
        print "dataLineElements[2 + i + 1]:",i,dataLineElements[2 + i + 1]
        exit(1)
'''

'''
# Rewrite this part 
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairNoDirectionProbabilityFor100KQueries"
inputFileHandler0 = open(inputFileName,"r")
oldHeadLine = inputFileHandler0.readline()

outputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairNoDirectionProbabilityFor100KQueries_NEW_sorted_by_term_pair"
outputFileHandler1 = open(outputFileName1,"w")

# do NOT write the header file because I need to use the command sort
# newHeadLine1 = "term_pair(default_sorted)" + " " + "freq_from_query_trace" + " " + "probability_from_query_trace" + "\n"
# outputFileHandler1.write(newHeadLine1)


indexCounter = 0
newTermPairList = []
newTermPairDict = {}

for oldLine in inputFileHandler0.readlines():
    lineElements = oldLine.strip().split(" ")
    
    termPair = lineElements[0]
    termPairFreq = lineElements[1]
    termPairProbability = lineElements[2]
        
    termPairElements = termPair.split("_")
    term1 = termPairElements[0]
    term2 = termPairElements[1]
    
    testedKey1 = term1 + "_" + term2
    testedKey2 = term2 + "_" + term1
    if testedKey1 not in newTermPairDict and testedKey2 not in newTermPairDict:
        if term1 <= term2:
            newTermPairDict[testedKey1] = True
            tuple = (testedKey1,termPairFreq,termPairProbability)
            newTermPairList.append(tuple)
        else:
            newTermPairDict[testedKey2] = True
            tuple = (testedKey2,termPairFreq,termPairProbability)
            newTermPairList.append(tuple)
    else:
        pass

# write the info from the newTermPairList out.
newTermPairList.sort(cmp=None, key=itemgetter(0), reverse=False)
for index,tuple in enumerate(newTermPairList):
    (termPair, termPairFreq, termPairProbability) = tuple
    outputFileHandler1.write(str(index) + " " + termPair + " " + termPairFreq + " " + termPairProbability + "\n")

inputFileHandler0.close()
outputFileHandler1.close()
'''



'''
# Rewrite this part 
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairNoDirectionProbabilityFor100KQueries"
inputFileHandler0 = open(inputFileName,"r")
oldHeadLine = inputFileHandler0.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairNoDirectionProbabilityFor100KQueries_NEW"
outputFileHandler = open(outputFileName,"w")
newHeadLine = oldHeadLine.strip() + " " + "popular_term_pair_without_order_unique_id" + "\n"
outputFileHandler.write(newHeadLine)

indexCounter = 0
for oldLine in inputFileHandler0.readlines():
    outputFileHandler.write(oldLine.strip() + " " + str(indexCounter) + "\n")
    indexCounter += 1

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
# temp fix the problem in the file /home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermThresholdsKeptBasedOnPercentage.txt. Add another column for the terms
inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermThresholdsKeptBasedOnPercentage.txt"
inputFileHandler0 = open(inputFileName,"r")
oldHeaderLine = inputFileHandler0.readline()
newHeaderLine = oldHeaderLine.strip() + " " + "1.0Kept" + "\n"

outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermThresholdsKeptBasedOnPercentage_NEW.txt"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(newHeaderLine) 

for line in inputFileHandler0.readlines():
    outputFileHandler.write(line.strip() + " " + "0" + "\n")

inputFileHandler0.close()
outputFileHandler.close()
'''


'''
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermsONLY.txt"
inputFileHandler1 = open(inputFileName1,"r")

inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermProbabilityDistribution_sortedByTerm"
inputFileHandler2 = open(inputFileName2,"r")

for line in inputFileHandler1.readlines():
    termFromFile2 = inputFileHandler2.readline().strip().split(" ")[0]
    termFromFile1 = line.strip()
    if termFromFile2 == termFromFile1:
        pass
    else:
        print "NOT good"
        print "termFromFile1:",termFromFile1
        print "termFromFile2:",termFromFile2

print "pass"
'''


'''
inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_sortedByTerm_rankInListAdded_sortedByDocID_rankInDocAdded_freqInCollectionAdded_sortedByQueryID"
inputFileHandler0 = open(inputFileName,"r")
oldHeadLine = inputFileHandler0.readline()
newHeadLine = oldHeadLine

outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_sortedByTerm_rankInListAdded_sortedByDocID_rankInDocAdded_freqInCollectionAdded_sortedByQueryID_fixed"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(newHeadLine)

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    newLine = ""
    for element in lineElements[0:10]:
        newLine += element + " "
    
    if lineElements[10] == "TOP10":
        newLine += "True" + " "
    elif lineElements[10] == "NotTOP10":
        newLine += "False" + " "
    
    for element in lineElements[11:14]:
        newLine += element + " "
    
    newLine += "\n"
    
    outputFileHandler.write( newLine )


inputFileHandler0.close()
outputFileHandler.close()
'''

'''
inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/lexiconTermsONLY.txt"
outputFileHandler = open(outputFileName,"w")

currentInputLine = inputFileHandler0.readline()

while currentInputLine:
    term = currentInputLine.strip().split(" ")[0]
    outputFileHandler.write(term + "\n")
    currentInputLine = inputFileHandler0.readline()

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
prefixLetterExistingDict = {}

basePath = "/home/diaosi/outputDirForTermScores"

print "give you some time to process"
# option1
inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"

# option2
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/freqOfQueryTermsInCollection.txt"

inputFileHandler0 = open(inputFileName,"r")

# read the lines line by line incrementally
currentLine = inputFileHandler0.readline()

originalLineCounter = 0
while currentLine:
    originalLineCounter += 1
    prefixLetter = currentLine.strip()[0]
    neededToCheckPath = basePath + "/" + prefixLetter
    if prefixLetter in prefixLetterExistingDict:
        # do NOT need to check anything
        pass
    else:
        if os.path.exists(neededToCheckPath):
            prefixLetterExistingDict[prefixLetter] = True
        else:
            print neededToCheckPath
            exit(1)
    currentLine = inputFileHandler0.readline()


inputFileHandler0.close()
print "originalLineCounter:",originalLineCounter
print "prefixLetterExistingDict:",prefixLetterExistingDict
print "len(prefixLetterExistingDict):",len(prefixLetterExistingDict)
'''

'''
#step0
def makeTheCorrespondingFolders():
    letterList = ["0","1","2","3","4","5","6","7","8","9","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
    print "step0"
    print "make the corresponding folders."
    for letter in letterList:
        directory = "/home/diaosi/outputDirForTermScores/" + letter 
        print directory
        os.mkdir(directory)

makeTheCorrespondingFolders()
'''

'''
def multiply(a,b):
    print "Will compute", a, "times", b
    c = 0
    for i in range(0, a):
        c = c + b
    return c

def hello():
    print "hello from Python"
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded_fixed_20130116"
inputFileHandler0 = open(inputFileName,"r")
infoHeadLine = inputFileHandler0.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded_fixed_20130116_sorted_by_rank_in_list"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

allLinesList = []

for line in inputFileHandler0.readlines():
    currentLineElements = line.strip().split(" ")
    modifiedCurrentLineElementsList = []
    modifiedCurrentLineElementsList.append( int( currentLineElements[0] ) )
    modifiedCurrentLineElementsList += currentLineElements[1:]
    # print "modifiedCurrentLineElementsList:",modifiedCurrentLineElementsList
    allLinesList.append(modifiedCurrentLineElementsList)

print "len(allLinesList):",len(allLinesList)

# make sure using the same sorting method
allLinesList.sort(cmp=None, key=itemgetter(21), reverse=False)
# allLinesList.sort(cmp=None, key=itemgetter(1), reverse=False)
# allLinesList.sort(cmp=None, key=itemgetter(0), reverse=False)

for lineElements in allLinesList:
    outputLine = ""
    outputLine += str( lineElements[0] ) + " "
    for element in lineElements[1:]:
        outputLine += element + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write( outputLine )

print "OK"
inputFileHandler0.close()
outputFileHandler.close()
'''

'''
inputDataSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutput_with_trecID_added.txt"
inputDataSourceFileHandler = open(inputDataSourceFileName,"r")
inputDataSourceFileHandler.seek(115362260628)

currentLine = inputDataSourceFileHandler.readline()
while currentLine.strip().split(" ")[0] != "GX019-35-1502414":
    # print currentLine.strip().split(" ")[0]
    currentLine = inputDataSourceFileHandler.readline()
print "currentLine:",currentLine
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded.OLD"
inputFileHandler0 = open(inputFileName,"r")
oldInfoHeadLine = inputFileHandler0.readline()
oldInfoHeadLineElements = oldInfoHeadLine.strip().split(" ")
for index,element in enumerate(oldInfoHeadLineElements):
    print index,element
    
print

newInfoHeadLine = ""
for element in oldInfoHeadLineElements[:20]:
    newInfoHeadLine += element + " "

for element in oldInfoHeadLineElements[21:]:
    newInfoHeadLine += element + " "

newInfoHeadLine += "TOP10Label" + " " + "TOP50Label" + " " + "TOP100Label" + " " + "TOP11To50Label" + " " + "TOP51To100Label" + " " + "\n"

newInfoHeadLine.strip()

newInfoHeadLineElements = newInfoHeadLine.strip().split(" ")
for index,element in enumerate(newInfoHeadLineElements):
    print index,element

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(newInfoHeadLine)

for line in inputFileHandler0.readlines():
    newOutputLine = ""
    lineElements = line.strip().split(" ")
    for element in lineElements[:20]:
        newOutputLine += element + " "
    
    for element in lineElements[21:]:
        newOutputLine += element + " "
    
    # when I sample, should have more. 
    if lineElements[20] == "TOP10":
        newOutputLine += "True" + " " + "True" + " " + "True" + " " + "False" + " " + "False"
    elif lineElements[20] == "TOP11To50":
        newOutputLine += "False" + " " + "True" + " " + "True" + " " + "True" + " " + "False"
    elif lineElements[20] == "TOP51To100":
        newOutputLine += "False" + " " + "False" + " " + "True" + " " + "False" + " " + "True"
    
    newOutputLine += "\n"

    outputFileHandler.write(newOutputLine)

inputFileHandler0.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded"
inputFileHandler0 = open(inputFileName,"r")
infoHeadLine = inputFileHandler0.readline()
infoHeadLineElements = infoHeadLine.strip().split(" ")

dataLine = inputFileHandler0.readline()
dataLineElements = dataLine.strip().split(" ")

for index,element in enumerate( infoHeadLineElements ):
    print index,element,dataLineElements[index] 
''' 
 


'''
for index,line in enumerate( inputFileHandler0.readlines() ):
    if len( line.strip().split(" ") ) == 23:
        pass
    else:
        print "index:",index

print "Test Pass"
'''



'''
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term_rankInInvertedListAdded"
outputFileHandler = open(outputFileName,"w")

inputBasePath = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/parallel_work_for_feature_Rank_in_the_inverted_list/"
for currentPartNumber in range(1,11):
    fileName = "Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term_part" + "%02d" % currentPartNumber + "_rankInInvertedListAdded"
    absoluteFileName = inputBasePath + fileName
    currentInputFileHandler = open(absoluteFileName,"r")
    currentInputFileHandler.readline()
    
    for line in currentInputFileHandler.readlines():
        outputFileHandler.write(line)
    
    currentInputFileHandler.close()


outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiHLFeaturesAdded_OLD"
inputFileHandler0 = open(inputFileName,"r")
infoHeadLine = inputFileHandler0.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiHLFeaturesAdded"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    newRankInDoc = int( lineElements[-1] ) + 1
    newLine = ""
    for element in lineElements[:-1]:
        newLine += element + " "
    newLine += str(newRankInDoc) + " "
    newLine.strip()
    outputFileHandler.write(newLine + "\n")
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_tail_1K_verify_weiHLFeaturesAdded_verify"
inputFileHandler0 = open(inputFileName,"r")
inputFileHandler0.readline()

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[1]
    term = lineElements[2]
    rank_in_the_doc_from_High_Level = lineElements[21]
    rank_in_the_doc_from_polyIRToolkit = lineElements[23]
    print queryID,trecID,term,rank_in_the_doc_from_High_Level,rank_in_the_doc_from_polyIRToolkit
    # print line.strip()
    # print len( line.strip().split(" ") )
'''  


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiHLFeaturesAdded"
inputFileHandler0 = open(inputFileName,"r")
inputFileHandler0.readline()

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K"
inputFileHandler2 = open(inputFileName2,"r")
inputFileHandler2.readline()

for index,line in enumerate( inputFileHandler2.readlines() ):
    comparedLine = inputFileHandler0.readline()
    lineElements = line.strip().split(" ")
    
    modifyLine = ""
    for element in lineElements[:-1]:
        modifyLine += element + " "
    modifyLine = modifyLine.strip()
    
    if comparedLine.startswith(modifyLine):
        pass
    else:
        print "index:",index
        print "newLine:",comparedLine.strip()
        print "oldLine:",line.strip()
        print
        exit()

print "Test Pass"
'''    


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfQueryTermsInCollection.txt"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsONLY.txt"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    term = lineElements[0]
    outputFileHandler.write(term + "\n")

inputFileHandler0.close()
outputFileHandler.close()
'''


'''
# The purpose of this part of the code
lexiconTermDict = {}

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfQueryTermsInCollection.txt"
outputFileHandler = open(outputFileName,"w")
  
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler2 = open(inputFileName2,"r")
for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    term = lineElements[0]
    freqInCollection = int( lineElements[1] )
    if term not in lexiconTermDict:
        lexiconTermDict[term] = freqInCollection        
print "len(lexiconTermDict):",len(lexiconTermDict)

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries.txt"
inputFileHandler0 = open(inputFileName,"r")
for line in inputFileHandler0.readlines():
    newLine = ""
    term = line.strip().split(" ")[0]
    if term in lexiconTermDict:
        newLine = term + " " + str( lexiconTermDict[term] ) + "\n"
    else:
        print "lexicon do NOT have the term:",term
        newLine = term + " " + "0" + "\n"
    outputFileHandler.write(newLine)
inputFileHandler0.close()
inputFileHandler2.close()
outputFileHandler.close()
'''




'''
numOfDatalinesForEachFile = 50000
numberOfFiles = 10

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term"
inputFileHandler0 = open(inputFileName,"r")
oldInfoHeadLine = inputFileHandler0.readline()

currentPartNumber = 1

for index,line in enumerate( inputFileHandler0.readlines() ):
    if index % 50000 == 0:
        currentOutputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/parallel_work_for_feature_Rank_in_the_inverted_list/" + "Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term" + "_part" + "%02d" % currentPartNumber
        currentOutputFileHandler = open(currentOutputFileName,"w")
        currentOutputFileHandler.write( oldInfoHeadLine.strip() + " (Part%02dOutof10)" % currentPartNumber + "\n")
        currentPartNumber += 1
    currentOutputFileHandler.write( line )
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K"
inputFileHandler0 = open(inputFileName,"r")
infoHeadLine = inputFileHandler0.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K" + "_sorted_by_term"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

allLinesList = []

for line in inputFileHandler0.readlines():
    currentLineElements = line.strip().split(" ")
    allLinesList.append(currentLineElements)

print "len(allLinesList):",len(allLinesList)

allLinesList.sort(cmp=None, key=itemgetter(2), reverse=False)
for lineElements in allLinesList:
    outputLine = ""
    for element in lineElements:
        outputLine += element + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write( outputLine )

print "OK"
inputFileHandler0.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded"
inputFileHandler0 = open(inputFileName,"r")
infoHeadLine = inputFileHandler0.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded" + "_sorted_by_trecID"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

allLinesList = []

for line in inputFileHandler0.readlines():
    currentLineElements = line.strip().split(" ")
    allLinesList.append(currentLineElements)

print "len(allLinesList):",len(allLinesList)

allLinesList.sort(cmp=None, key=itemgetter(1), reverse=False)
for lineElements in allLinesList:
    outputLine = ""
    for element in lineElements:
        outputLine += element + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write( outputLine )

print "OK"
inputFileHandler0.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/20121215Posting_Oriented_Balanced_Training_Dataset.txt.input"
inputFileHandler0 = open(inputFileName,"r")

numTop10Count = 0
numNotTop10Count = 0


for line in inputFileHandler0.readlines():
    if line.strip().split(" ")[9] == "TOP10":
        numTop10Count += 1
    elif line.strip().split(" ")[9] == "NOTTOP10":
        numNotTop10Count += 1

print "numTop10Count:",numTop10Count
print "numNotTop10Count:",numNotTop10Count
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/allInfoAboutRankInList"
inputFileHandler0 = open(inputFileName,"r")

postingRankDict = {}
tempCounter = 0

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0] + "_" + lineElements[1] + "_" + lineElements[2]
    if key not in postingRankDict:
        postingRankDict[key] = lineElements[3]
    else:
        print "Duplicated posting, key shown:",key
        tempCounter += 1

print "len(postingRankDict):",len(postingRankDict)
print "tempCounter:",tempCounter

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/trainingIntermediateFile20121205.txt"
outputFileHandler = open(outputFileName,"w")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/allInfoAboutRankInDoc"
inputFileHandler2 = open(inputFileName2,"r")
for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    accessKey = lineElements[0] + "_" + lineElements[1] + "_" + lineElements[2]
    outputFileHandler.write( line.strip() + " " + postingRankDict[accessKey] + "\n")

outputFileHandler.close()
'''

'''
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/allInfoAboutRankInList"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_beginning_part0_add_rank_in_list"
print inputFileName
inputFileHandler0 = open(inputFileName,"r")

for line in inputFileHandler0.readlines():
    outputFileHandler.write(line)

for i in range(1,21):
    currentInputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_left_part" + "%02d" % i + "_add_rank_in_list"
    # print currentInputFileName
    
    currentInputFileHandler = open(currentInputFileName,"r")
    for line in currentInputFileHandler.readlines():
        outputFileHandler.write(line)
    
outputFileHandler.close()
'''

'''
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/allInfoAboutRankInDoc"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_beginning_part00_rank_in_document"
inputFileHandler0 = open(inputFileName,"r")

for line in inputFileHandler0.readlines():
    outputFileHandler.write(line)

for i in range(1,11):
    currentInputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left_part" + "%02d" % i + "_rank_in_document"
    currentInputFileHandler = open(currentInputFileName,"r")
    for line in currentInputFileHandler.readlines():
        outputFileHandler.write(line)

outputFileHandler.close()
'''


'''
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left_part10"
inputFileHandler1 = open(inputFileName1,"r")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left_part10_rank_in_document"
inputFileHandler2 = open(inputFileName2,"r")

file1Lines = inputFileHandler1.readlines()
file2Lines = inputFileHandler2.readlines()

for index,line in enumerate(file1Lines):
    if line.strip().split(" ")[0] == file2Lines[index].strip().split(" ")[0] and line.strip().split(" ")[1] == file2Lines[index].strip().split(" ")[1] and line.strip().split(" ")[2] == file2Lines[index].strip().split(" ")[2]:
        pass
    else:
        print "sth wrong."
        print "len(file1Lines):",len(file1Lines)
        print "len(file2Lines):",len(file2Lines)
        print "index:",index
        print "line from file1:",line.strip()
        print "line from file2:",file2Lines[index].strip()
        exit(1)
        
print "all pass"
'''



'''
# The purpose of this script is to split the file into # of parts which have been specified.

print "len(sys.argv):",len(sys.argv)

if len(sys.argv) != 2:
    print "Illegal # of arguments"
    print "Usage: python programName.py #ofFilesWantToSplit"
    exit(1)
   
numberOfFilesWantToSplit = int( sys.argv[1] )

#option1
#inputFileName1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left"

#option2
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_left"

inputFileHandler1 = open(inputFileName1,"r")

totalNumberOfLines = len( inputFileHandler1.readlines() )
print "totalNumberOfLines:",totalNumberOfLines
numberOfLinesPerFile = math.ceil( totalNumberOfLines/numberOfFilesWantToSplit )
print "numberOfLinesPerFile:",numberOfLinesPerFile

inputFileHandler1.close()

inputFileHandler1 = open(inputFileName1,"r")

i = 0

for index,line in enumerate( inputFileHandler1.readlines() ):
    if index % numberOfLinesPerFile == 0:
        i += 1
        currentOutputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_left" + "_part%02d" % i
        currentOutputFileHandler = open(currentOutputFileName,"w")        
    
    currentOutputFileHandler.write(line)

inputFileHandler1.close()
'''

'''
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_part0"
inputFileHandler1 = open(inputFileName1,"r")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_rank_in_document_added.txt.input_part0"
inputFileHandler2 = open(inputFileName2,"r")

lineNumber = 0

currentLineForFile1 = inputFileHandler1.readline()
currentLineForFile2 = inputFileHandler2.readline()
lineNumber += 1

while currentLineForFile1.strip().split(" ")[0] == currentLineForFile2.strip().split(" ")[0] and currentLineForFile1.strip().split(" ")[1] == currentLineForFile2.strip().split(" ")[1] and currentLineForFile1.strip().split(" ")[2] == currentLineForFile2.strip().split(" ")[2]:
    currentLineForFile1 = inputFileHandler1.readline()
    currentLineForFile2 = inputFileHandler2.readline()
    
    if currentLineForFile1.strip() == "" and currentLineForFile2.strip() == "":
        break

    lineNumber += 1

print "lineNumber:",lineNumber
print "currentLineForFile1:",currentLineForFile1
print "currentLineForFile2:",currentLineForFile2


inputFileHandler1.close()
inputFileHandler2.close()
'''



'''
duplicatedTupleCheckDict = {}

inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input"
inputFileHandler0 = open(inputFileName,"r")

counter = 0

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[1]
    term = lineElements[2]
    tuple = (queryID, trecID, term)
    if tuple not in duplicatedTupleCheckDict:
        duplicatedTupleCheckDict[tuple] = 1
    else:
        duplicatedTupleCheckDict[tuple] += 1
        print "shit, this is duplicate:",tuple
        counter += 1

print "counter:",counter
inputFileHandler0.close()
'''


'''
traingTupleList = []

inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input"
inputFileHandler0 = open(inputFileName,"r")


# This is for generating the features of rank_in_inverted_index
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    simplifiedTuple = (lineElements[0],lineElements[1],lineElements[2])
    traingTupleList.append(simplifiedTuple)

print "len(traingTupleList):",len(traingTupleList)

traingTupleList_sorted = sorted(traingTupleList, key=itemgetter(2))

for tuple in traingTupleList_sorted:
    # print tuple
    (queryID, trecID, term) = tuple
    # print queryID,trecID,term
    outputFileHandler.write(queryID + " " + trecID + " " + term + "\n")

inputFileHandler0.close()
outputFileHandler.close()

print "DONE"
'''


'''
inputFileName = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/allQueriesIncludingHumanJudgedAndEffeciencyTask"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/allQueriesIncludingHumanJudgedAndEffeciencyTask_NEW20121128.txt"
outputFileHandler = open(outputFileName,"w")

totalNumberOfLine = 147

for line in inputFileHandler0.readlines():
    if totalNumberOfLine > 0:
        lineElements = line.strip().split(":")
        
        originalQueryID = int( lineElements[0] )
        newQueryID = originalQueryID + 100000
        
        print originalQueryID,lineElements[1]
        print newQueryID,lineElements[1]
        
        totalNumberOfLine -= 1
        outputFileHandler.write( str( newQueryID ) + ":" + lineElements[1] + "\n")
        
    else:
        outputFileHandler.write( line.strip() + "\n" )

inputFileHandler0.close()
outputFileHandler.close()
'''



'''
# write a program that make the query related lexicon term NOT fake anymore
# the involved file includes the following file: "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-fake-queryTermsCollectionFreqs.txt" 

lexiconTermFreqInCollectionDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler0 = open(inputFileName,"r")

currentLine = inputFileHandler0.readline()

while currentLine:
    lineElements = currentLine.strip().split(" ")
    
    if len( lineElements ) == 2:
        if lineElements[0].strip() not in lexiconTermFreqInCollectionDict:
            lexiconTermFreqInCollectionDict[lineElements[0].strip()] = int( lineElements[1].strip() )
    else:
        exit(1)
    
    currentLine = inputFileHandler0.readline()

print "All passed"
print "lexiconTermFreqInCollectionDict['0']:",lexiconTermFreqInCollectionDict['0']
print "len( lexiconTermFreqInCollectionDict ):",len( lexiconTermFreqInCollectionDict )
'''

'''
print "make the corresponding folders."
for i in range(0,273):
    directory = "/data3/obukai/human_judge_web_pages_gov2_ALL/" + "GX%03d" % i
    print directory
    os.mkdir(directory)
'''


'''
allRelevenceDocs = {}
allRelevenceDocsList = []

inputFileNameList = []
inputFileName1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/priorityTrecDocumentsIDs_all_sorted_with_index_number_BM25_oriented.txt"
inputFileName2 = "/data1/team/obukai/machine-learning-project-related/trec-related/trecCombine04_06/priorityTrecDocumentsIDs_all_sorted_with_index_number.txt"

outputFileName = "/data1/team/obukai/machine-learning-project-related/trec-related/trecCombine04_06/priorityTrecDocumentsIDs_include_approximating_BM25_AND_human_judged.txt"
outputFileHandler = open(outputFileName,"w")

inputFileNameList.append(inputFileName1)
inputFileNameList.append(inputFileName2)

for name in inputFileNameList:
    inputFileHandler0 = open(name,"r")
    for line in inputFileHandler0.readlines():
        lineElements = line.strip().split(" ")
        if lineElements[1] not in allRelevenceDocs:
            allRelevenceDocs[lineElements[1]] = 1
        else:
            allRelevenceDocs[lineElements[1]] += 1
            
print "len(allRelevenceDocs):",len(allRelevenceDocs)
'''



'''
for docID in allRelevenceDocs:
    if docID.startswith("GX"):
        pass
    else:
        print "NOT passed:",docID
'''


'''
allRelevenceDocsList = allRelevenceDocs.keys()
allRelevenceDocsList.sort(cmp=None, key=None, reverse=False)

for index,docID in enumerate( allRelevenceDocsList ):
    outputFileHandler.write( str( index ) + " " + docID + " " + "-1" + "\n")


inputFileHandler0.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented.txt.output"
inputFileHandler0 = open(inputFileName,"r")

inputFileName2 = "/data1/team/obukai/machine-learning-project-related/learningToPrune/qrels.tb04-tb06.top150_CONTAINS_ONLY_147"
inputFileHandler2 = open(inputFileName2,"r")

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_with_label.txt.output"
outputFileHandler = open(outputFileName,"w")

judgeDict = {}

for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[2]
    relevenceScore = lineElements[3]
    
    key = queryID + "_" + trecID
    if key not in judgeDict:
        judgeDict[key] = relevenceScore

print "len(judgeDict):",len(judgeDict)
# print "judgeDict['850_GX265-48-6314208']:",judgeDict['850_GX265-48-6314208']
    
for line in inputFileHandler0.readlines():
    outputLine = ""
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[1]
    key = queryID + "_" + trecID 
    outputLine = line.strip() + " " + judgeDict[key]
    outputFileHandler.write(outputLine + "\n")
    # print "outputLine:",outputLine

inputFileHandler0.close()
inputFileHandler2.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_with_label.txt.output"
inputFileHandler0 = open(inputFileName,"r")

line = inputFileHandler0.readline()
lineElements = line.strip().split(" ")
print "len(lineElements):",len(lineElements)
for index,element in enumerate(lineElements):
    if index == 0:
        print index,"queryID:",element
    elif index == 1:
        print index,"trecID:",element
    elif index == 2:
        print index,"postingTerm:",element
    elif index == 3:
        print index,"doc_words:",element
    elif index == 4:
        print index,"doc_distinct_words:",element
    elif index == 5:
        print index,"text_size:",element
    elif index == 6:
        print index,"script_size:",element
    elif index == 7:
        print index,"script_text_ratio:",element
    elif index == 8:
        print index,"doc_outlinks:",element
    elif index == 9:
        print index,"current_term_col_freq:",element
    elif index == 10:
        print index,"whether_current_term_in_header:",element
    elif index == 11:
        print index,"whether_current_term_in_title:",element
    elif index == 12:
        print index,"whether_current_term_in_bold:",element
    elif index == 13:
        print index,"whether_current_term_in_url:",element
    elif index == 14:
        print index,"whether_current_term_in_italic:",element
    elif index == 15:
        print index,"current_term_freq_in_doc:",element
    elif index == 16:
        print index,"current_term_rel_freq_in_doc:",element   
    elif index == 17:
        print index,"current_term_BM25(BM25 Score for this term):",element
    elif index == 18:
        print index,"current_term_QL(Language Model Score for this term):",element
    elif index == 19:
        print index,"current_term_distribution(Term Distribution Score for this term):",element
    elif index == 20:
        print index,"relevence score1 label:",element
    else:
        print index,element
'''


'''
inputFileName = "/home/obukai/workspace/polyIRToolkit/scripts/src/pythonScripts/gov2/tempErrorMessage_need_to_handled.txt"
inputFileHandler0 = open(inputFileName,"r")

while True:
    indexLine = inputFileHandler0.readline()
    documentSourceLine = inputFileHandler0.readline()
    parsedStateLine = inputFileHandler0.readline()
    docLengthLine = inputFileHandler0.readline()
    emptyLine = inputFileHandler0.readline()
    
    print "doc length:",docLengthLine
'''    



'''
import random
import math
import os
from HTMLParser import HTMLParser
from HTMLParser import HTMLParseError

###########################################################################################################################class begin...
class MyHTMLParser(HTMLParser):
    def handle_starttag(self, tag, attrs):
        # print "Start tag:", tag
        # set the current_tag to tag
        self.current_tag = tag
        
        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
        
        for attr in attrs:
            (attr_name, attr_value) = attr
            if attr_name == "href":
                #print "     attr:", attr
                self.doc_outlinks += 1
    def handle_endtag(self, tag):
        #print "End tag  :", tag
        # set back the current_tag to ""
        self.current_tag = ""
        
    def handle_data(self, data):
        if hasattr(self, 'current_tag'):
            # step0: pre-processing
            for i in range(0,len(data)):
                # print "data[i]:",ord(data[i])
                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                    # Just replace them with a space.
                    data = data[:i] + " " + data[i+1:]
            
            if hasattr(self, 'header_words'):
                pass
            else:
                self.header_words = []
                self.header_words_dict = {}

            if hasattr(self, 'title_words'):
                pass
            else:
                self.title_words = []
                self.title_words_dict = {}
            
            if hasattr(self, 'b_or_strong_words'):
                pass
            else:
                self.b_or_strong_words = []
                self.b_or_strong_words_dict = {}          
            
            if hasattr(self, 'a_words'):
                pass
            else:
                self.a_words = []
                self.a_words_dict = {}
            
            if hasattr(self, 'i_or_em_words'):
                pass
            else:
                self.i_or_em_words = []
                self.i_or_em_words_dict = {}                       
            
                        
            # step1: some context feature
            if self.current_tag == "head":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.header_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.header_words_dict:
                            self.header_words_dict[lowerCaseWord] = 1
                        else:
                            self.header_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "title":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.title_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.title_words_dict:
                            self.title_words_dict[lowerCaseWord] = 1
                        else:
                            self.title_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "b" or self.current_tag == "strong":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.b_or_strong_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.b_or_strong_words_dict:
                            self.b_or_strong_words_dict[lowerCaseWord] = 1
                        else:
                            self.b_or_strong_words_dict[lowerCaseWord] += 1
                                     
            if self.current_tag == "a":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.a_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.a_words_dict:
                            self.a_words_dict[lowerCaseWord] = 1
                        else:
                            self.a_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "i" or self.current_tag == "em":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.i_or_em_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.i_or_em_words_dict:
                            self.i_or_em_words_dict[lowerCaseWord] = 1
                        else:
                            self.i_or_em_words_dict[lowerCaseWord] += 1
            
       
            # step2: text or script
            if self.current_tag == "script" or self.current_tag == "style":
                #print "The following content should NOT be indexed."
                #print "script content     :", data
                #print "script length     :",len(data)
                if hasattr(self, 'script_size'):
                    self.script_size += len(data)
                else:
                    self.script_size = len(data)
            else:
                if hasattr(self, 'text_size'):
                    self.text_size += len(data)
                else:
                    self.text_size = len(data)
                # print "text content:*",data,"*"

                for i in range(0,len(data)):
                    # print "data[i]:",ord(data[i])
                    if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                        # Just replace them with a space.
                        data = data[:i] + " " + data[i+1:]
                
                
                if hasattr(self, 'doc_words'):
                    pass
                else:
                    self.doc_words = []
                
                if hasattr(self, 'doc_distinct_words'):
                    pass
                else:
                    self.doc_distinct_words = []               

                
                if hasattr(self, 'doc_words_dict'):
                    pass
                else:
                    self.doc_words_dict = {}
                                    
                #print "raw word list:",data.split(" ")
                
                for word in data.split(" "):
                    if word != "":
                        # for later use.
                        lowerCaseWord = word.lower()
                        
                        # for current use
                        # lowerCaseWord = word
                        self.doc_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.doc_words_dict:
                            self.doc_words_dict[lowerCaseWord] = 1
                        else:
                            self.doc_words_dict[lowerCaseWord] += 1
                        
                        
                        if lowerCaseWord not in self.doc_distinct_words:
                            self.doc_distinct_words.append(lowerCaseWord)
                        
                #print "doc_words:",self.doc_words
                #print "doc_distinct_words:",self.doc_distinct_words
                  
                #print "text length      :", len(data)
        else:
            pass
    def handle_comment(self, data):
        pass
        #print "Comment  :", data
    def handle_entityref(self, name):
        pass
        #c = unichr(name2codepoint[name])
        #print "Named ent:", c
    def handle_charref(self, name):
        pass
        #if name.startswith('x'):
        #    c = unichr(int(name[1:], 16))
        #else:
        #    c = unichr(int(name))
        #print "Num ent  :", c
    def handle_decl(self, data):
        pass
        #print "Decl     :", data
    
    def generate_statistics_report(self,status):
        
        print "-----> completed_parsed:",status
        
        # attrs check or init
        if hasattr(self, 'doc_words'):
            pass
        else:
            self.doc_words = []

        if hasattr(self, 'doc_distinct_words'):
            pass
        else:
            self.doc_distinct_words = []
        
        if hasattr(self, 'text_size'):
            pass
        else:
            self.text_size = 0
            
        if hasattr(self, 'script_size'):
            pass
        else:
            self.script_size = 0       

        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
            
        if hasattr(self, 'doc_waterloo_spam'):
            pass
        else:
            self.doc_waterloo_spam = 0
                
        #print "len(doc_words):",len(self.doc_words)
        #print "len(doc_distinct_words):",len(self.doc_distinct_words)
        #print "text_size:",self.text_size
        #print "script_size:",self.script_size
        if self.text_size != 0:
            script_text_ratio = self.script_size / self.text_size
        else:
            script_text_ratio = 0.0
        #print "script_text_ratio:",script_text_ratio
        #print "doc_outlinks:",self.doc_outlinks
        
        #outputFileFeaturePart1Handler.write(str( len(self.doc_words) ) + " ")
        #outputFileFeaturePart1Handler.write(str( len(self.doc_distinct_words) )  + " ")
        #outputFileFeaturePart1Handler.write(str(self.text_size)  + " ")
        #outputFileFeaturePart1Handler.write(str(self.script_size)  + " ")
        #outputFileFeaturePart1Handler.write(str(script_text_ratio)  + " ")
        #outputFileFeaturePart1Handler.write(str(self.doc_outlinks)  + " ")
        
        
        
        #print "self.header_words:",self.header_words
        #print "self.header_words_dict:",self.header_words_dict

        #print
        #print "self.title_words:",self.title_words
        #print
        #print "self.title_words_dict:",self.title_words_dict
    
        #print
        #print "self.b_or_strong_words:",self.b_or_strong_words
        #print
        #print "self.b_or_strong_words_dict:",self.b_or_strong_words_dict
    
        #print
        #print "self.a_words:",self.a_words
        #print
        #print "self.a_words_dict:",self.a_words_dict
    
        #print
        #print "self.i_or_em_words:",self.i_or_em_words
        #print
        #print "self.i_or_em_words_dict:",self.i_or_em_words_dict 
        
        #print
        #print "self.doc_words:",self.doc_words
        
        #print
        #print "self.doc_distinct_words:",self.doc_distinct_words
        
        #print 
        #print "self.doc_words_dict:",self.doc_words_dict
###########################################################################################################################class end.

inputFileName = "/data1/team/obukai/machine-learning-project-related/trec-related/trecCombine04_06/priorityTrecDocumentsIDs_all_sorted_with_index_number.txt"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/home/obukai/workspace/polyIRToolkit/scripts/src/pythonScripts/gov2/tempErrorMessage.txt"
outputFileErrorHandler = open(outputFileName, "w")

human_judge_query_location_base_path = "/data5/team/obukai/human_judge_web_pages_gov2/"

for index,line in enumerate( inputFileHandler0.readlines() ):
    print "index:",index
    outputFileErrorHandler.write("index:" + str(index) + "\n")
    #if index == 2:
    #    break
    
    trecID = line.strip().split(" ")[1]
    
    trecIDElements = trecID.split("-")
    
    segmentNumber = trecIDElements[0]
    
    fileNamePrefixLookingFor = trecID
    
    pathLookFor = human_judge_query_location_base_path + segmentNumber
    
    foundTag = False
    
    for dirname, dirnames, filenames in os.walk(pathLookFor):
        for filename in filenames:
            if filename.startswith(fileNamePrefixLookingFor):
                foundTag = True
                # print filename
                absolutePathForWebPageFileName = os.path.join(dirname, filename)
                print "-----> ",absolutePathForWebPageFileName
                outputFileErrorHandler.write("-----> " + absolutePathForWebPageFileName + "\n")
                
                absolutePathForWebPageFileHandler = open(absolutePathForWebPageFileName,"r")
                file_content = absolutePathForWebPageFileHandler.read()
                absolutePathForWebPageFileHandler.close()
                
                web_page_content = file_content
                parser = MyHTMLParser()
                
                try:
                    parser.feed(web_page_content)
                    parser.generate_statistics_report(1)
                    
                    outputFileErrorHandler.write("-----> completed_parsed: 1" + "\n") 
                    outputFileErrorHandler.write("-----> doc length:" + str( len(parser.doc_words) ) + "\n") 
                except HTMLParseError,e:
                    print "----->",absolutePathForWebPageFileName
                    print "----->",trecID,":",e.msg,":",e.lineno,":",e.offset
                    outputFileErrorHandler.write("----->" + trecID + ":" + e.msg + ":" + str(e.lineno) + ":" + str(e.offset) + "\n")
                    #outputErrorMessageFileHandle.write("----->" + trecID + ":" + e.msg + ":" + str(e.lineno) + ":" + str(e.offset) + "\n")
                    # do not handle the error message.
                    parser.generate_statistics_report(0)
                    
                    outputFileErrorHandler.write("-----> completed_parsed: 0" + "\n")
                    outputFileErrorHandler.write("-----> doc length:" + str( len(parser.doc_words) ) + "\n")
                except UnicodeDecodeError,e:
                    print "----->",absolutePathForWebPageFileName
                    print "----->",e
                    outputFileErrorHandler.write("----->" + trecID + " UnicodeDecodeError Error." "\n")
                    #outputErrorMessageFileHandle.write("----->" + trecID + " UnicodeDecodeError Error." "\n")
                    # do not handle the error message.
                    parser.generate_statistics_report(0)
                    
                    outputFileErrorHandler.write("-----> completed_parsed: 0" + "\n")
                    outputFileErrorHandler.write("-----> doc length:" + str( len(parser.doc_words) ) + "\n")
                
                print
                outputFileErrorHandler.write("\n")
                
                
    

inputFileHandler0.close()
outputFileErrorHandler.close()
'''



    

# for line in inputFileHandler0.readlines():
#    print len(line.strip().split(" "))

'''
queriesDict = {}

#the whole purpose of this training file is to build the wholeTrainingFileTempleteGov2V1.txt.input

# the file for me to compare:
# /data5/team/weijiang/the_new_trip_of_feature_generation/wholeTrainingFileTempleteV3_phase0.txt.input

inputFileName1 = "/data1/team/obukai/machine-learning-project-related/learningToPrune/qrels.tb04-tb06.top150_CONTAINS_ONLY_147"
inputFileHandler1 = open(inputFileName1,"r")

inputFileName2 = "/data1/team/weijiang/workspace/polyIRToolkit/polyIRIndexer/04-06.topics.701-850.polyIRTKCompatibleMode"
inputFileHandler2 = open(inputFileName2,"r")

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1.txt.input"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler2.readlines():
    queryID = int( line.strip().split(":")[0] )
    
    data = line.strip().split(":")[1]
    data = data.lower()
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    
    queryContent = data
    
    if queryID not in queriesDict:
        queriesDict[queryID] = queryContent    
    # print 

for line in inputFileHandler1.readlines():
    # example line: 850 0 GX272-67-14117174 0
    queryID = int(line.strip().split(" ")[0])
    trecID = line.strip().split(" ")[2]
    relevenceScore = line.strip().split(" ")[3]
    outputFileHandler.write( str(queryID) + " " + trecID + " '" + queriesDict[queryID] + "' " + relevenceScore + "\n")

print "len(queriesDict):",len(queriesDict)
print "Done."

inputFileHandler1.close()
inputFileHandler2.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data1/team/weijiang/workspace/polyIRToolkit/polyIRIndexer/04-06.topics.701-850.polyIRTKCompatibleMode"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-fake-queryTermsCollectionFreqs.txt"
outputFileHandler = open(outputFileName,"w")

queryTermList = []

for line in inputFileHandler0.readlines():
    data = line.strip().split(":")[1]
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]

    queryTerms = data.split(" ")
    
    for queryTerm in queryTerms:
        if queryTerm.lower() != "" and queryTerm.lower() not in queryTermList:
            queryTermList.append(queryTerm.lower())

queryTermList.sort(cmp=None, key=None, reverse=False)

for queryTerm in queryTermList:
    outputFileHandler.write(queryTerm + " " + str(random.randint(1, 100000000)) + "\n")


inputFileHandler0.close()
outputFileHandler.close()
'''


'''
for dirname, dirnames, filenames in os.walk('/data5/team/obukai/human_judge_web_pages_gov2'):
    # for subdirname in dirnames:
    #     print os.path.join(dirname, subdirname)
    for filename in filenames:
        print os.path.join(dirname, filename)
'''

'''
basedPath = "/data5/team/obukai/human_judge_web_pages_gov2/GX"

for i in range(0,273):
    directory = basedPath + "%03d" % i
    #print directory
    os.mkdir(directory)
'''


# This is the place for the python code to do backup
# old and backup code
# maybe useful but I am not sure

'''
# Currently NOT Using, please use step2-method2
# And I have stopped updating this
# OLD but still working method1
# step2-method1: compute the freq of freq for this distribution
# Updated by Wei 2013/04/11
# This method works OK for the fixed ranges and do not want to modify it cause is working OK.
# Instead, I will develop a new alg. which can read the ranges from the plaintext file. :)
print "do step2"
UPPER_BOUND_FOR_RANGE1 = 100
UPPER_BOUND_FOR_RANGE2 = 5000
UPPER_BOUND_FOR_RANGE3 = 80000
UPPER_BOUND_FOR_RANGE4 = 600000
currentTwoDClass = "N/A"

freqOfFreqDict = {}
freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution = {}
freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution = {}

classLabelList = ["VR","NF","M","F","VF"]
for i in range(0,20):
    for classLabel in classLabelList:
        key = str(i) + "_" + classLabel
        freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution[key] = 0

queryTermsWithFreqInCollectionDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputAuxFileHandler = open(inputAuxFileName,"r")
for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    freqInCollection = int(lineElements[1])
    if queryTerm not in queryTermsWithFreqInCollectionDict:
        queryTermsWithFreqInCollectionDict[queryTerm] = freqInCollection
    else:
        print "Unexpected Behaviour"

print "len(queryTermsWithFreqInCollectionDict):",len(queryTermsWithFreqInCollectionDict)
inputAuxFileHandler.close()


inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%_sortedByQueryTermFreq"
inputFileHandler0 = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/freqOfFreqInQueries_head_85K_0_85%_sortedByFreqR.txt"
outputFileHandler = open(outputFileName,"w")

outputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/freqOfFreqInQueries_head_85K_0_85%_2D_with_query_terms.txt"
outputFileHandler2 = open(outputFileName2,"w")

for i in range(1,11473):
    freqOfFreqDict[i] = 0

print "len(freqOfFreqDict):",len(freqOfFreqDict)

for line in inputFileHandler0.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    freq = int(lineElements[1])
    if freq < 20:
        if queryTerm in queryTermsWithFreqInCollectionDict:
            lengthOfListForLexiconTerm = queryTermsWithFreqInCollectionDict[queryTerm]
            if lengthOfListForLexiconTerm < 1:
                print "The query term:",queryTerm,"does NOT appear in the lexicon."
            else:
                if lengthOfListForLexiconTerm >= 1 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE1:
                    # it is very rare
                    # example:
                    # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
                    currentTwoDClass = "VR"
        
                elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE1 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE2:
                    # it is not frequent
                    # example:
                    # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
                    currentTwoDClass = "NF"
        
                elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE2 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE3:
                    # it is medium
                    # example:
                    # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
                    currentTwoDClass = "M"
        
                elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE3 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE4:
                    # it is frequent
                    # example:
                    # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
                    currentTwoDClass = "F"
        
                elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE4:
                    # it is very frequent
                    # example:
                    # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
                    currentTwoDClass = "VF"
                
                currentTwoDClass = str(freq) + "_" + currentTwoDClass
                
                print queryTerm,freq,lengthOfListForLexiconTerm,currentTwoDClass
                
                freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution[currentTwoDClass] += 1
                if currentTwoDClass not in freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution:
                    freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution[currentTwoDClass] = []
                    freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution[currentTwoDClass].append(queryTerm)
                else:
                    freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution[currentTwoDClass].append(queryTerm)
        else:
            print "The query term:",queryTerm,"does NOT appear in the lexicon."
    freqOfFreqDict[freq] += 1

# output the freq of freq
for i in range(1,11473):
    outputFileHandler.write(str(i) + " " + str( freqOfFreqDict[i] ) + "\n")


print "freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution:",freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution
print "len(freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution):",len(freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution)

for key in freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution:
    outputLine = key + " " + str(freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution[key]) + " "
    
    if key in freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution:
        for queryTerm in freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution[key]:
            outputLine += queryTerm + " "
    
    outputLine += "\n"
    outputFileHandler2.write(outputLine)


inputFileHandler0.close()
outputFileHandler.close()
outputFileHandler2.close()
'''
