# Updated by Wei 2013/07/18 night, I can use this bucket methods understand how well my predictor is doing.
# Both in the preliminary evaluation and the end-to-end application evaluation 

from __future__ import division
import random
import math
import os

print "Program Begins..."
# globalTempCounter = 0
# Updated by Wei 2013/07/01
# init the variable buckets100ArrayCounterDict
# key: the lower bound of the probability starts from 0, 0.01, 0.02, 0.03, 0.04 ... till 0.99
# value:
buckets100ArrayCounterDict = {}
baseBeginningPoint = 0.0
stepGapInFloat = 0.01
keyInFloat = baseBeginningPoint
tempCounter = 0
# the meaning of this tuple pair
# first column value: how many postings are in this bucket
# second column value: how many postings are label 1 (top100)
tupleValuePair = (0,0)
for i in range(0,100):
    tempCounter += 1
    keyInString = "{0:.2f}".format(keyInFloat)
    if keyInString not in buckets100ArrayCounterDict:
        buckets100ArrayCounterDict[keyInString] = tupleValuePair
    keyInFloat += stepGapInFloat

print "tempCounter:",tempCounter
print "len(buckets100ArrayCounterDict):",len(buckets100ArrayCounterDict)
print "buckets100ArrayCounterDict:",buckets100ArrayCounterDict

baseValue = 0
keyInFloat = baseValue
for i in range(0,100):
    keyInString = str("{0:.2f}".format(keyInFloat))
    print keyInString,buckets100ArrayCounterDict[keyInString]
    keyInFloat += 0.01

START_FROM_WHAT_ID_NUM = -1
numOfQueriesHavingQID = 0
MAX_NUM_QUERY_LENGTH = 10
numOfQueriesHavingSearchContent = 0
numOfQueriesHavingSearchResults = 0

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/rawResultsTail1KANDSemanticsTOP1M"
inputFileHandler = open(inputFileName,"r")

currentLine = inputFileHandler.readline()
nextLine = ""
currentQID = ""
numOfResultsForTheAllQueries = 0

currentQueryProcessFlag = False
while currentLine:
    # sampling parsing line:
    # qid: 701
    if currentLine.startswith("qid:"):
        currentQID = currentLine.strip().split(":")[1].strip()
        print "currentQID:",currentQID
        currentQIDInInt = int(currentQID)
        if currentQIDInInt >= START_FROM_WHAT_ID_NUM:
            currentQueryProcessFlag = True
            print "PROCESS"
        else:
            currentQueryProcessFlag = False
            print "SKIP"
        numOfQueriesHavingQID += 1
    
    # sample parsing line:
    # Search: u s  oil industry history
    if currentLine.startswith("Search:") and currentQueryProcessFlag:
        elementList = currentLine.strip().split(" ")
        
        currentSearchContent = ""
        
        queryTermList = []
        for element in elementList[1:]:
            if element.strip() != "":
                queryTermList.append( element.strip() )        
        currentQueryLength = len(queryTermList)
        print "currentQueryLength:",currentQueryLength
        
        if currentQueryLength > MAX_NUM_QUERY_LENGTH:
            print "Exceed The Query Max Length, SKIP"
            currentQueryProcessFlag = False
            continue

        for element in queryTermList:
            currentSearchContent += element + " "
        
        print currentSearchContent
        numOfQueriesHavingSearchContent += 1
        
        
        currentQueryTermIndexDict = {}
        nextLine = inputFileHandler.readline()
        #print nextLine.strip()
        if nextLine.strip() != "":
            nextLine = inputFileHandler.readline()
            # print "nextLine(debug):",nextLine
            nextLineElements = nextLine.strip().split(" ")
            # print "nextLineElements:",nextLineElements
            # sample parsing line: 
            # oil:0 industry:1 history:2 u:3 s:4
            for element in nextLineElements:
                if element.split(":")[0] not in currentQueryTermIndexDict:
                    currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
            
            print "currentQueryTermIndexDict:",currentQueryTermIndexDict
            # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
            
            currentLine = inputFileHandler.readline() # skip the line: currentLine(debug): threshold_socre_of_posting:0
            currentLine = inputFileHandler.readline() # skip the line: computation_method:0
            currentLine = inputFileHandler.readline() # skip the line: NEW Logistic Regression probability logic implementation by Wei 2013/06/19
            
            currentLine = inputFileHandler.readline() # Now, it is time to construct the training examples
            numOfResultsForTheCurrentQuery = 0
            currentResultLines = []
            
            
            
            # for debug ONLY
            # print "currentLine(debug):",currentLine
            # print 'len( currentLine.strip().split(" ") )(debug):',len( currentLine.strip().split(" ") )
            # exit(1)
            
            currentLineElements = currentLine.strip().split(" ")
            base = 1
            while len( currentLineElements ) == 45:
                numOfResultsForTheCurrentQuery += 1
                
                for i in range(0,len(currentQueryTermIndexDict)):
                    # postingTuple format:
                    # (1) term, str( currentQueryTermIndexDict[i] )
                    # (2) internal_doc_id, str( currentLineElements[33] )
                    # (3) external_trec_id, str( currentLineElements[34] )
                    # (4) partialBM25_score, float( currentLineElements[base + i] )
                    # (5) freq_in_collection, int( currentLineElements[base + i + 10] )
                    # (6) freq_in_doc, int( currentLineElements[base + i + 10 + 10] )
                    # (7) doc_words(NOT distinct words), int( currentLineElements[31] )
                    # (8) result_rank_for_this_posting, int( currentLineElements[0] )
                    term = str( currentQueryTermIndexDict[i] )
                    internal_doc_id = str( currentLineElements[43] )
                    external_trec_id = str( currentLineElements[44] )
                    # do NOT need this info
                    # totalBM25_score = float( currentLineElements[42] )
                    probability_value = float( currentLineElements[base + i] )
                    # partialBM25 is used for checking whether this posting is existing or not
                    partialBM25_score = float( currentLineElements[base + i + 10] )
                    # do NOT need this info
                    # freq_in_collection = int( currentLineElements[base + i + 10 + 10] )
                    # do NOT need this info
                    # freq_in_doc = int( currentLineElements[base + i + 10 + 10 + 10] )
                    # do NOT need this info
                    # doc_words = int( currentLineElements[41] )
                    result_rank_for_this_posting = int( currentLineElements[0] )
                    postingLabel = "NA"
                    if result_rank_for_this_posting <= 100:
                        postingLabel = "1"
                    else:
                        postingLabel = "0"
                    
                    if partialBM25_score != 0.0:
                        # 2 generative info should be enough
                        # They are: probability_value,postingLabel
                        # meta info includes:
                        # term,internal_doc_id,external_trec_id,probability_value,result_rank_for_this_posting
                        
                        # step1:
                        # print "probability_value:",probability_value
                        # print "probability_value_left_ONLY_one_digit_in_string_format:","{0:.2f}".format(probability_value)
                        probability_value_left_ONLY_two_digit_in_float_format = float( "{0:.2f}".format(probability_value) )
                        # print "probability_value_left_ONLY_two_digit_in_float_format:",probability_value_left_ONLY_two_digit_in_float_format
                        
                        # deal with the rounding problem
                        if probability_value_left_ONLY_two_digit_in_float_format > probability_value:
                            current_posting_key_in_float_format = probability_value_left_ONLY_two_digit_in_float_format - 0.01
                        else:
                            current_posting_key_in_float_format = probability_value_left_ONLY_two_digit_in_float_format
                        
                        # print "current_posting_key_in_float_format:",current_posting_key_in_float_format
                        current_posting_key_in_string_format = "{0:.2f}".format(current_posting_key_in_float_format)
                        (currentBucketCounter,currentBucketTop100LabelCounter) = buckets100ArrayCounterDict[current_posting_key_in_string_format]
                        currentBucketCounter += 1
                        if postingLabel == "1":
                            currentBucketTop100LabelCounter += 1 
                        else:
                            # just do nothing if postingLabel != 1
                            pass
                        
                        newPairValueTuple = (currentBucketCounter,currentBucketTop100LabelCounter)
                        buckets100ArrayCounterDict[current_posting_key_in_string_format] = newPairValueTuple
                        
                        # for debug ONLY
                        '''
                        if current_posting_key_in_string_format == "0.85" and postingLabel == "1":
                            print "-->:",newPairValueTuple,term,probability_value,result_rank_for_this_posting
                        elif current_posting_key_in_string_format == "0.85" and postingLabel != "1":
                            globalTempCounter += 1
                        '''
                        
                        pass
                        
                currentResultLines.append(currentLine.strip())
                currentLine = inputFileHandler.readline()
                currentLineElements = currentLine.strip().split(" ")
            
            numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
            
            # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
            # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
            # This sampling method has been developed on 2013/01/09
            
            print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
            print "len(currentResultLines):",len(currentResultLines)
            
            # for debug ONLY
            # the following is the selection process
            # print "len(queryTermList):",len(queryTermList)
            if len(queryTermList) > 10:
                # The result list returned only have 10 holds so it only support the query which has the maximun length of 10
                pass
            else:
                
                if numOfResultsForTheCurrentQuery == 0:
                    pass
                    # print "currentQID:",currentQID
                    # print "currentSearchContent:",currentSearchContent
                    # print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
                    # print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                else:
                    numOfQueriesHavingSearchResults += 1
            print
            
            # for debug ONLY
            # if numOfQueriesHavingSearchResults == 3:
            #    break
        
        currentQueryProcessFlag = False
    currentLine = inputFileHandler.readline()


print "final check begins..."
baseValue = 0
keyInFloat = baseValue
for i in range(0,100):
    keyInString = "{0:.2f}".format(keyInFloat)
    (value1,value2) = buckets100ArrayCounterDict[keyInString]
    print keyInString,value1,value2
    keyInFloat += 0.01
print "final check ends." 

# print "globalTempCounter:",globalTempCounter
print "numOfQueriesHavingQID:",numOfQueriesHavingQID
print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
print "numOfQueriesHavingSearchResults:",numOfQueriesHavingSearchResults
print "Program Ends."




