# Updated on 2013/08/20 night by Wei at school

# Updated on 2013/08/18 afternoon by Wei at school

# Updated on 2013/08/16 night by Wei at school
# Make the program also suitable for the set of TOP10 postings for the list hit distribution analyze (It naturally does, right?)
 
# Updated on 2013/07/27 night by Wei

# Updated by Wei 2013/07/22 night

# Updated by Wei 2013/07/20 night
# deal with a relatively large training file called:
# /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_19_tfqAdded_tfcAdded.train

# Updated by Wei 2013/07/13 night
# Fix a bug of sorting the list by the posting score in string format. Actually, it should sort the list by the posting score in float format

# Program inputs:
# (1) inputTrainingFileName = (an training file)

# main source from getting the rank
# (2) inputDataSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutput_with_trecID_added.txt"

# the aux file to help getting the rank
# (3) inputAuxSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutputAccessAuxFile_NEW_20121128.txt"

# Program outputs:
# output the file with the _rankInInvertedListAdded at the end
# (1) outputTrainingFileName = (an training file with postingRankInList added)

# Output File Analysis:
# new feature added: posting rank in list

from __future__ import division
from operator import itemgetter, attrgetter
import sys

print "program begins..."

# in production mode (new version)
# the requirement for the input file:
# (1) ordered by the query term for easy access to the inverted index
# (2) simplify the input cause it ONLY needs some fields to identify one record.

# for production:
# inputTrainingFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_13_tfqAdded.train"
# The file should start from xaa to xaj
# inputTrainingFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/distributedWorkForFeatureGenerationPostingRankInList20130721ForConnectedPostings/xae"
# inputTrainingFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/TrainingSetForFixingTheMissingTOPKResults20130715_tfqAdded_tfcAdded_WITHOUT_HEADLINE_sorted_by_term.train"
# inputTrainingFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/distributedWorkForFeatureGenerationPostingRankInList20130727/xaj"
inputTrainingFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/forwardIndexExperimentalRepresentation20130909_sorted_by_term"

# for debug
# inputTrainingFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_DEBUG.train"
inputTrainingFileHandler = open(inputTrainingFileName,"r")
print "inputTrainingFileName:",inputTrainingFileName

# Note, the following input file can be retrieved by the following commands:
# sort --key=3 Posting_Oriented_Training_Dataset_2013_02_22.train_tfqAdded_labelsAdded > Posting_Oriented_Training_Dataset_2013_02_22.train_tfqAdded_labelsAdded_sortedByTerm
# option2: feed the input directly from the program
# inputTrainingFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_06_09.smallTest.Combine.train_tfqAdded_labelsAdded_sortedByTerm"
# inputTrainingFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_02_22.train_tfqAdded_labelsAdded_sortedByTerm"

# Using
# outputTrainingFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_13_tfqAdded_postingRankInListAdded.train"
outputTrainingFileName = inputTrainingFileName + "_postingRankInListAdded"
# outputTrainingFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified_add_rank_in_list.txt.input"
print "outputTrainingFileName:",outputTrainingFileName
outputTrainingFileHandler = open(outputTrainingFileName,"w")

'''
# switch ON/OFF for headline
# some files have the header lines but the others don't.
# need to change it accordingly.
oldInfoHeadLine = inputTrainingFileHandler.readline()
newInfoHeadLine = oldInfoHeadLine.strip() + " " + "posting_rank_in_inverted_list" + "\n"
outputTrainingFileHandler.write(newInfoHeadLine)
'''

# Used (online computing or just offline computing everything?)
# my decision is: online compute, NOT offline computing everything

# option1 for the machine pangolin
inputDataSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutput_with_trecID_added.txt"
# option2 for the machine dodo
# inputDataSourceFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutput_with_trecID_added.txt"
inputDataSourceFileHandler = open(inputDataSourceFileName,"r")

# option1 for the machine pangolin
inputAuxSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutputAccessAuxFile_NEW_20121128.txt"
# option2 for the machine dodo
# inputAuxSourceFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutputAccessAuxFile_NEW_20121128.txt"
inputAuxSourceFileHandler = open(inputAuxSourceFileName,"r")

# key: term
# value: a list has the following info in sequence:[termIndexNumber,invertedIndexLength,invertedIndexBeginningPosition,invertedIndexEndingPosition] 
queryTermInvertedIndexInfo = {}

for line in inputAuxSourceFileHandler.readlines():
    lineElements = line.strip().split(" ")
    
    termIndexNumber = int(lineElements[0])
    term = lineElements[1]
    invertedIndexLength = int(lineElements[2])
    invertedIndexBeginningPosition = int(lineElements[3])
    invertedIndexEndingPosition = int(lineElements[4])
    
    if term not in queryTermInvertedIndexInfo:
        queryTermInvertedIndexInfo[term] = []
        # index 0
        queryTermInvertedIndexInfo[term].append(termIndexNumber)
        # index 1
        queryTermInvertedIndexInfo[term].append(invertedIndexLength)
        # index 2
        queryTermInvertedIndexInfo[term].append(invertedIndexBeginningPosition)
        # index 3
        queryTermInvertedIndexInfo[term].append(invertedIndexEndingPosition)   
    else:
        print "unnormal, mark1"
        exit(1)

print "len(queryTermInvertedIndexInfo):",len(queryTermInvertedIndexInfo)

currentExploringTerm = ""
currentTermTrecIDScorePairList = []
# currentTermTrecIDRankInListDict = {}

currentTermTrecIDScorePair_sorted = []
currentTermTrecID_sorted_based_on_score = []


for index,line in enumerate( inputTrainingFileHandler.readlines() ):
    
    lineElements = line.strip().split(" ")
    queryID = lineElements[1]
    trecID = lineElements[2]
    # internalID = lineElements[3]
    term = lineElements[4]
    
    print "queryID:",queryID
    print "trecID:",trecID
    print "term:",term
    # print "currentExploringTerm:",currentExploringTerm
    
    if term in queryTermInvertedIndexInfo:
        if term != currentExploringTerm:
            print "------>Load the corresponding info..."
            ############################################################################################################################
            
            # the alg is as the following: access that info, maintain a counter, larger +1 smaller, remain the same
            currentTermInvertedIndexCorrectLength = queryTermInvertedIndexInfo[term][1]
            currentTermInvertedIndexBeginningPosition = queryTermInvertedIndexInfo[term][2]
            currentTermInvertedIndexEndingPosition = queryTermInvertedIndexInfo[term][3]
            
            print "------>currentTermInvertedIndexCorrectLength:",currentTermInvertedIndexCorrectLength
            print "------>currentTermInvertedIndexBeginningPosition:",currentTermInvertedIndexBeginningPosition
            print "------>currentTermInvertedIndexEndingPosition:",currentTermInvertedIndexEndingPosition
            
            
            ###################################################
            # Clean things up
            # current version
            outputTrainingFileHandler.flush()
            del currentTermTrecIDScorePairList[:]
            del currentTermTrecIDScorePair_sorted[:]
            del currentTermTrecID_sorted_based_on_score[:]
            
            # currentTermTrecIDRankInListDict.clear()
            ###################################################
            
    
            
            # print "currentTermInvertedIndexBeginningPosition:",currentTermInvertedIndexBeginningPosition
            
            inputDataSourceFileHandler.seek(currentTermInvertedIndexBeginningPosition)
            
            currentDataLine = inputDataSourceFileHandler.readline()
            # print "first line:",currentDataLine
            
            while inputDataSourceFileHandler.tell() <= currentTermInvertedIndexEndingPosition:
                
                currentDataLineElements = currentDataLine.strip().split(" ")
                
                # put it into dict
                currentTrecID = currentDataLineElements[0]
                
                # ingore
                docIDInIRSystem = currentDataLineElements[1]
                
                # put it into dict
                postingScoreInStringFormat = currentDataLineElements[2]
                postingScoreInFloatFormat = float(postingScoreInStringFormat)
                trecIDPostingScoreTuple = (currentTrecID,postingScoreInFloatFormat)
                
                # currentTermTrecIDScorePairDict[term].append(trecIDPostingScoreTuple)
                
                currentTermTrecIDScorePairList.append(trecIDPostingScoreTuple)
                
                currentDataLine = inputDataSourceFileHandler.readline()
            
            # dealing with the last line
            # print "last line:",currentDataLine
            currentDataLineElements = currentDataLine.strip().split(" ")
            
            # put it into dict
            currentTrecID = currentDataLineElements[0]
            
            # ingore
            docIDInIRSystem = currentDataLineElements[1]
            
            # put it into dict
            postingScoreInStringFormat = currentDataLineElements[2]
            postingScoreInFloatFormat = float(postingScoreInStringFormat)
            trecIDPostingScoreTuple = (currentTrecID,postingScoreInFloatFormat)
            
            # currentTermTrecIDScorePairDict[term].append(trecIDPostingScoreTuple)
            currentTermTrecIDScorePairList.append(trecIDPostingScoreTuple)
            
            currentDataLine = inputDataSourceFileHandler.readline()
            
            # print "len(currentTermTrecIDScorePairDict[term]):",len( currentTermTrecIDScorePairDict[term] )
            # print "currentTermTrecIDScorePairDict[term]:",currentTermTrecIDScorePairDict[term]
            
            # It is time to sort the list currentTermTrecIDScorePairDict[term]
            print "------>mark1: finish loading the list into main memory. len(currentTermTrecIDScorePairList):",len(currentTermTrecIDScorePairList)
            # old version
            #currentTermTrecIDScorePair_sorted = sorted(currentTermTrecIDScorePairDict[term], key=itemgetter(1), reverse=True)
            
            # new version
            currentTermTrecIDScorePair_sorted = sorted(currentTermTrecIDScorePairList, key=itemgetter(1), reverse=True)
            for pairTuple in currentTermTrecIDScorePair_sorted:
                (currentTrecID, score) = pairTuple
                currentTermTrecID_sorted_based_on_score.append(currentTrecID)
            
            print "------>mark2: finish sorting the list"
            
            '''
            print "debug begins..."
            print "currentTermTrecIDScorePair_sorted:",currentTermTrecIDScorePair_sorted
            print
            print "currentTermTrecID_sorted_based_on_score:",currentTermTrecID_sorted_based_on_score
            print
            print "debug ends."
            '''
            
            # print "len(currentTermTrecIDRankInListDict[term]):",len( currentTermTrecIDRankInListDict[term] )
            currentExploringTerm = term    
            ############################################################################################################################
        
        else:
            print "------>Direct assign the rank..."
            print "len(currentTermTrecIDScorePairList):",len(currentTermTrecIDScorePairList)
            # print "len(currentTermTrecIDRankInListDict):",len(currentTermTrecIDRankInListDict)
        
        # Now it is time to assign the exciting rank to the training instances
        # currentPostingOutputRankInList = currentTermTrecIDRankInListDict[term][trecID]
        
        # old version
        # currentPostingOutputRankInList = currentTermTrecIDRankInListDict[trecID]
        
        # new version
        currentPostingOutputRankInList = currentTermTrecID_sorted_based_on_score.index(trecID) + 1
        print "------>mark4: finish getting the posting rank in the inverted list"
        # current version
        outputTrainingFileHandler.write(line.strip() + " " + str(currentPostingOutputRankInList) + "\n")
        
        # old version
        # outputTrainingFileHandler.write(queryID + " " + trecID + " " + term + " " + str(currentPostingOutputRankInList) + "\n")
        print 
    else:
        # for the terms which have NOT find the aux info and the exact rank in the list, just assign them a -1
        outputTrainingFileHandler.write(line.strip() + " " + str(-1) + "\n")
        
print "pass"

inputTrainingFileHandler.close()
inputDataSourceFileHandler.close()
inputAuxSourceFileHandler.close()
outputTrainingFileHandler.close()

print "Overall Processing Statistics:"
print "inputTrainingFileName:",inputTrainingFileName
print "inputDataSourceFileName:",inputDataSourceFileName
print "inputAuxSourceFileName:",inputAuxSourceFileName
print "outputTrainingFileName:",outputTrainingFileName
print "program ends."