from __future__ import division
from operator import itemgetter, attrgetter

import gc
import math
import matplotlib
import os
import pylab
import random
import sys
import time
from sets import Set
from scipy import stats
import numpy as np

print "Program Begins..."
print "a list of files as input:"
# step1:
# key: classLabel in int format
# value: a dict containing pieces info
    # key: the pieceID in int format
    # value: the probability that the next posting will hit this piece area in float format
classLabelWithPiecesMetaInfoDict = {}
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithPiecesInfo_COMPLETED"

# step2:
# key: term in string format
# value: Currently NO USE
selectedTermsDict = {}
inputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithLocalPostingIDsAndDocIDs_20130927Morning"

# step3:
# key: term in string format
# value: class label in int format
termClassLabelDict = {}
# key: term in string format
# value: a dict
    # key: pieceNumber
    # value: numOfPostingsInThisPiece
termPiecesInfoDict = {}
inputFileName3 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"

# step4:
# key: freq in int format
# value: probability in float format
freq_first_factor_probability_dict = {}
# key: term in string format
# value: freq in int format
terms_with_corresponding_species_belonging_to_dict = {}
inputFileName4 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/good_turing_estimation_output_for_terms_0_1_95K_95%_probabilityInQueryAdded_20130731"
inputFileName5 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/freqOfFreqInQueries_0_1_95K_95%_with_query_terms_appended_sortedByFreqR"

# step5:
# key: trecID in string format
# value: all XDocs in float format
trecIDsWithTheirAllXdocsDict = {}
inputFileName6 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/priorityDocumentSets20130926Morning"
inputFileName7 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2_Docs_with_TheirXdocValues_renamed_20130926"

# step6:
inputFileName8 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithLocalPostingIDsAndDocIDs_20130927Morning"
outputFileName1 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithLocalPostingIDsWithStaticANDDynamicPart_20130928Afternoon"
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "inputFileName3:",inputFileName3
print "inputFileName4:",inputFileName4
print "inputFileName5:",inputFileName5
print "inputFileName6:",inputFileName6
print "inputFileName7:",inputFileName7
print "inputFileName8:",inputFileName8
print "the file as output:"
print "outputFileName1:",outputFileName1

# pending to do:
# list_length_and_relatative_rank_and_probabilities_file_name:
# ---> /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithPiecesInfo_COMPLETED
# terms_with_length_of_list_and_class_label_and_num_of_postings_in_each_pieces_file_name:
# where is the whole lexicon terms?
# ---> /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819


# fill the variable called: classLabelWithPiecesMetaInfoDict
inputFileHandler = open(inputFileName1,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split("\t")
    # print "lineElements:",lineElements
    currentClassLabel = int(lineElements[0])
    if currentClassLabel not in classLabelWithPiecesMetaInfoDict:
        classLabelWithPiecesMetaInfoDict[currentClassLabel] = {}
    else:
        print "mark1,error"
        exit(1)
    numOfPiecesInCurrentClass = int(lineElements[1])
    assert numOfPiecesInCurrentClass == len(lineElements[2:])
    for pieceIndex,probabilityThatTheNextPostingWillHitThisPieceAreaInStringFormat in enumerate(lineElements[2:]):
        probabilityThatTheNextPostingWillHitThisPieceAreaInFloatFormat = float(probabilityThatTheNextPostingWillHitThisPieceAreaInStringFormat)
        if pieceIndex not in classLabelWithPiecesMetaInfoDict[currentClassLabel]:
            classLabelWithPiecesMetaInfoDict[currentClassLabel][pieceIndex] = probabilityThatTheNextPostingWillHitThisPieceAreaInFloatFormat
        else:
            print "mark2,error"
            exit(1)

print "len(classLabelWithPiecesMetaInfoDict):",len(classLabelWithPiecesMetaInfoDict)
print "classLabelWithPiecesMetaInfoDict[0]:",classLabelWithPiecesMetaInfoDict[0]
print "classLabelWithPiecesMetaInfoDict[4]:",classLabelWithPiecesMetaInfoDict[4]
print "classLabelWithPiecesMetaInfoDict[70]:",classLabelWithPiecesMetaInfoDict[70]
# classLabelWithPiecesMetaInfoDict[0]: {0: 1.0}
# classLabelWithPiecesMetaInfoDict[4]: {0: 0.37657715949530901, 1: 0.62342284050469099}
inputFileHandler.close()


# fill the variable: selectedTermsDict 
inputFileHandler = open(inputFileName2,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    term = lineElements[0]
    if term not in selectedTermsDict:
        selectedTermsDict[term] = 1
    else:
        pass # duplicated terms
inputFileHandler.close()
# NOte: for this turn: 24262 terms have been selected
print "len(selectedTermsDict):",len(selectedTermsDict)


# The format of the following file is:
# There are 5 parts:
# column0: term
# column1: length of the list
# column2: class label determined based on the length of the list (There are specific lower bound and upper bound)
# column3: # of pieces followed
# column(4) - column(END): piece# numOfPostingsInCurrentPiece piece# numOfPostingsInCurrentPiece piece# numOfPostingsInCurrentPiece ...
inputFileHandler = open(inputFileName3,"r")
lineCounter = 0 # init

currentLine = inputFileHandler.readline()
lineCounter += 1

while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTerm = currentLineElements[0]
    
    if currentTerm in selectedTermsDict:
        # ignore currentLineElements[1], cause we will some other sources provide this info about length of the inverted list for this term
        currentTermClassLabelInIntFormat = int( currentLineElements[2] )
        currentTermNumOfPiecesHave = int( currentLineElements[3] )
        
        # CURRENT version
        # fill the variable: termClassLabelDict
        termClassLabelDict[currentTerm] = currentTermClassLabelInIntFormat 
        # key: term
        # value: another dict
            # key: piece num
            # value: # of postings in this piece
        termPiecesInfoDict[currentTerm] = {}
        
        # fill the variable: termPiecesInfoDict
        baseIndex = 4
        for i in range( 0,len( currentLineElements[4:]),2):
            currentPieceNum = int( currentLineElements[4+i] )
            currentNumOfPostingsInThiePiece = int( currentLineElements[4+i+1])
            termPiecesInfoDict[currentTerm][currentPieceNum] = currentNumOfPostingsInThiePiece
    else:
        pass # This term is NOT in the random selected terms in this term

    currentLine = inputFileHandler.readline()
    lineCounter += 1
    
    if lineCounter % 1000000 == 0:
        print lineCounter,"terms processed."
    
inputFileHandler.close()
print "len(termClassLabelDict):",len(termClassLabelDict)
print "len(termPiecesInfoDict):",len(termPiecesInfoDict)
print "termClassLabelDict['0']:",termClassLabelDict['0']
print "termPiecesInfoDict['0']:",termPiecesInfoDict['0']
# print "termClassLabelDict['0120j4']:",termClassLabelDict['0120j4']
# print "termPiecesInfoDict['0120j4']:",termPiecesInfoDict['0120j4']
# len(termClassLabelDict): 38871
# len(termPiecesInfoDict): 38871
# termClassLabelDict['0']: 63
# termPiecesInfoDict['0']: {0: 4200166, 1: 2100083, 2: 1050041, 3: 525020, 4: 262510, 5: 131255, 6: 65627, 7: 32813, 8: 16406, 9: 8203, 10: 4101, 11: 2050, 12: 1025, 13: 512, 14: 256, 15: 128, 16: 64, 17: 73}
# termClassLabelDict['0120j4']: -1
# termPiecesInfoDict['0120j4']: {}
inputFileHandler.close()


# ignore this part for DEBUGGING
# fill the variable: freq_first_factor_probability_dict and terms_with_corresponding_species_belonging_to_dict
inputFileHandler = open(inputFileName4,"r")
# skip the 4 headlines:
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()

currentFreq = 0
currentProbabilityOfATermAppearedInTheNextQuerySlot = 0.0;

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int( lineElements[0] )
    currentProbabilityOfATermAppearedInTheNextQuerySlot = float( lineElements[4] )
    freq_first_factor_probability_dict[currentFreq] = currentProbabilityOfATermAppearedInTheNextQuerySlot
print "len(freq_first_factor_probability_dict):",len(freq_first_factor_probability_dict)
inputFileHandler.close()

inputFileHandler = open(inputFileName5,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int( lineElements[0 ] )
    numOfTermsAssociated = int( lineElements[1] )
    # print "len(lineElements):",len(lineElements)
    if numOfTermsAssociated == 0:
        pass
    else:
        for term in lineElements[2:]:
            terms_with_corresponding_species_belonging_to_dict[term] = currentFreq
print "len(terms_with_corresponding_species_belonging_to_dict):",len(terms_with_corresponding_species_belonging_to_dict) 
inputFileHandler.close()

print "P['soalr']:",freq_first_factor_probability_dict[ terms_with_corresponding_species_belonging_to_dict["soalr"] ]
print "P['so']:",freq_first_factor_probability_dict[ terms_with_corresponding_species_belonging_to_dict["so"] ] 
print "terms_with_corresponding_species_belonging_to_dict['0']:",terms_with_corresponding_species_belonging_to_dict["0"]
print "terms_with_corresponding_species_belonging_to_dict['00']:",terms_with_corresponding_species_belonging_to_dict["00"]
# print "terms_with_corresponding_species_belonging_to_dict['0000']:",terms_with_corresponding_species_belonging_to_dict["0000"]


inputFileHandler = open(inputFileName6,"r")
for line in inputFileHandler.readlines():
    currentTrecID = line.strip()
    if currentTrecID not in trecIDsWithTheirAllXdocsDict:
        trecIDsWithTheirAllXdocsDict[currentTrecID] = 0.0;
    else:
        print "problem"
        exit(1)
print "len(trecIDsWithTheirAllXdocsDict):",len(trecIDsWithTheirAllXdocsDict)
inputFileHandler.close()

# fill the All_Xdoc values

inputFileHandler = open(inputFileName7,"r")
# skip the headline
currentLine = inputFileHandler.readline()

# the first data line
currentLine = inputFileHandler.readline()
dataLineCounter = 1

currentDocXdocValueBasedOnGoodTurning = 0.0;
while currentLine:
    if dataLineCounter % 1000000 == 0:
        print "dataLineCounter:",dataLineCounter,"processed."
    dataLineCounter += 1
    currentLineElements = currentLine.strip().split(" ")
    currentTrecID = currentLineElements[0]
    if currentTrecID not in trecIDsWithTheirAllXdocsDict:
        pass
    else:
        currentDocXdocValueBasedOnGoodTurning = float( currentLineElements[5] )
        # print "currentTrecID: ",currentTrecID
        # print "currentDocXdocValueBasedOnGoodTurning: ",currentDocXdocValueBasedOnGoodTurning
        trecIDsWithTheirAllXdocsDict[currentTrecID] = currentDocXdocValueBasedOnGoodTurning
    currentLine = inputFileHandler.readline()
inputFileHandler.close()
print "len(trecIDsWithTheirAllXdocsDict):",len(trecIDsWithTheirAllXdocsDict)



# output the maximum potential usefulness of a posting
outputFileHandler = open(outputFileName1,"w")

# The file randomlySelectedWithLocalPostingIDsAndDocIDs_20130927Morning has been opened for the SECOND time
inputFileHandler = open(inputFileName8,"r")
currentLine = inputFileHandler.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTerm = currentLineElements[0]
    currentPostingLocalIndex = int( currentLineElements[1] )
    currentPostingLocalRank = currentPostingLocalIndex + 1
    
    
    # key: term in string format
    # value: class label in int format
    # termClassLabelDict = {}
    currentTermBelongingClassLabel = termClassLabelDict[currentTerm]
    currentTermPostingBelongingPieceNum = -1
    
    # key: term in string format
    # value: a dict
        # key: pieceNumber
        # value: numOfPostingsInThisPiece
    # termPiecesInfoDict = {}
    currentListRankAccumulatedUpperBound = 0
    i = len(termPiecesInfoDict[currentTerm])
    while currentPostingLocalRank > currentListRankAccumulatedUpperBound:
        i -= 1
        currentListRankAccumulatedUpperBound += termPiecesInfoDict[currentTerm][i]
    currentTermPostingBelongingPieceNum = i
    
    # key: classLabel in int format
    # value: a dict containing pieces info
        # key: the pieceID in int format
        # value: the probability that the next posting will hit this piece area in float format
    # classLabelWithPiecesMetaInfoDict = {}
    # numerator = piece probability 
    # denominator = # of postings in that specific piece
    # currentPostingStaticProbability = numerator / denominator
    currentPostingStaticProbability = classLabelWithPiecesMetaInfoDict[currentTermBelongingClassLabel][currentTermPostingBelongingPieceNum] / termPiecesInfoDict[currentTerm][i]
    
    # for debug
    # print "currentTerm:",currentTerm
    # print "currentPostingLocalIndex:",currentPostingLocalIndex
    # print "currentPostingLocalRank:",currentPostingLocalRank
    # print "currentTermBelongingClassLabel:",currentTermBelongingClassLabel
    # print "currentTermPostingBelongingPieceNum:",currentTermPostingBelongingPieceNum
    # print "currentPostingStaticProbability:",currentPostingStaticProbability
    # print
    
    
    
    # Part 2 comment out for DEBUGGING
    currentTermFirstFactorProbability = 0.0
    if currentTerm not in terms_with_corresponding_species_belonging_to_dict:
        currentTermFirstFactorProbability = freq_first_factor_probability_dict[ 0 ]
    else:
        currentTermFirstFactorProbability = freq_first_factor_probability_dict[ terms_with_corresponding_species_belonging_to_dict[currentTerm] ]
    
    # for debug
    # print "currentTerm:",currentTerm
    # print "terms_with_corresponding_species_belonging_to_dict[currentTerm]:",terms_with_corresponding_species_belonging_to_dict[currentTerm]
    # print "currentTermFirstFactorProbability:",freq_first_factor_probability_dict[ terms_with_corresponding_species_belonging_to_dict[currentTerm] ]
    
    currentPostingBelongingTrecID = currentLineElements[3]
    
    currentPostingBelongingTrecIDAllXDoc = trecIDsWithTheirAllXdocsDict[currentPostingBelongingTrecID]
    # column0: the term itself
    # column1: local index of the sorted list for the term
    # column2: docID
    # column3: trecID
    # column4: currentTermFirstFactorProbability
    # column5: currentPostingStaticProbability (static part) 
    # column6: currentPostingBelongingTrecIDAllXDoc (dynamic part)

    outputFileHandler.write(currentLine.strip() + " " + str(currentTermFirstFactorProbability) + " " + str(currentPostingStaticProbability) + " " + str(currentPostingBelongingTrecIDAllXDoc) + "\n")
    
    currentLine = inputFileHandler.readline()
    
inputFileHandler.close()
outputFileHandler.close()

print "Program Ends."


