# This program has been updated by Wei 2013/07/19 afternoon on pangolin
# Note: For this time, I need to sample the total of 5M training posting instances
# Note: Let's break it into 2 steps, 
# step 1: generating 4M training posting instances
# step 2: generating 1M training posting instances
# Note: they should all come from different queries, sepecially from the training queries and the testing queries.

# This program has been updated by Wei 2013/07/15 afternoon on pangolin
# For the sampling method again, what we are using as the input source has been changed again since 2013/07/10
# Q: what variables I need to adjust when using the SAMPLING_METHOD = 3
# A: ONLY 4. They are: (1)TOTAL_NUMBER_OF_TRAINING_POSTING_NEEDED (2)inputFileName (3)outputFileName (4)outputAuxFileName

# This program has been updated by Wei 2013/06/07 morning
# Program inputs:
# option1 (These are just examples)
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/resultsNeededToAnalyze.txt"

# option2 (These are just examples)
# inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/smallResultSetNeededToBeAnalyzed.txt"

# Program outputs:
# option1 (These are just examples)
# (1) outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_09.txt"
# (2) outputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_09.aux"

# option2 (These are just examples)
# (1) outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_02_21.train"
# (2) outputAuxFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.aux"

from __future__ import division
import random
import math
import os

print "Program Begins..."
# Updated by Wei: 2012/12/15
# human judged queries are NOT very related here cause the purpose of pruning is to mimic the behavior of the BM25 function applying to the unpruned index

# Many questions with answers:
# Q: How many training instances do you want?
# Current Answer by Wei 2013/06/07
# A: About 100 postings from the head 1000 queries
# OLD Answer by Wei 2013/01/09
# A: The # of training examples are NOT important cause we have a fast ML learner.

# Q: How is the sampling method works?
# Current Answer by Wei 2013/06/07
# A: In general, two ways. I can either select 50% from the top100 list and the rest 50% select from the tail list OR
#     I can select half of the topK from the beginning the list and half of the topK from the end of the list
# OLD Answer by Wei 2013/02/21
# A: 4 levels, Top10, Top100, Top1000, Top10000
# OLD Answer by Wei 2013/01/09
# A: 3 levels, TOP10, TOP50, TOP100
# select that document and then all the postings associated with that selected document (prof's advice)
# Note(Updated by Wei 2013/06/07): NOW, he actually think this is NOT the case. Cause in our phase1 machine learning, it should be independent of each posting

# OLD Answer by Wei 2012/12/15
# the first posting oriented training example, the relation name is called learning_to_prune_20121205 and has 382791 training instances. Now I want to have roughly the same amount of training instances, 
# but with a balance dataset which means that the # of positive training examples and the # of negative training examples are roughly the SAME. My goal is 400K training examples.

# OLD Answer by Wei weeks ago (The basetime is 2012/12/15)
# Previous Answer: From the previous training files I submitted to prof, there are 422853 training instances. So this time, let's make the number of trainin examples to 450K 

# Some statistics about this smallResultSetNeededToBeAnalyzed.txt document:
# This file is located in /home/diaosi/web-search-engine-wei/polyIRIndexer/smallResultSetNeededToBeAnalyzed.txt
# Number of results(data lines) in the file smallResultSetNeededToBeAnalyzed.txt: UNKNOWN 
# Number of queries issued: 10
# Number of queries executed: 10
# Number of queries having results returned: 10
# Number of single term queries: 1
# Total querying time: 2.434235 seconds

# Some statistics about this resultsNeededToAnalyze.txt document:
# This file is located in /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/resultsNeededToAnalyze.txt
# Number of results(datalines) in the file resultsNeededToAnalyze.txt: 8353225
# Number of queries issued: 100000(efficieny task)+127(human judged) = 100127
# Number of queries executed: 99715
# Number of queries having results returned: 97243
# Number of single term queries: 1789
# Total querying time: 11434.219647 seconds

# Known issues?
# The average length of the queries are 3.4. Here, we neglect all the queries which length >= 10, is that OK?

# Updated by Wei 2013/07/11
# total number of posting examples needed
# note: this variable should be firstly used by the sampling method = 3
# the value = 10 is for debugging purposes
# the value = 100000 is for production mode

# Updated by Wei 2013/07/19 afternoon. Let's do 6M this time
TOTAL_NUMBER_OF_TRAINING_POSTING_NEEDED = 6000000
TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED = 0
TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_PICKED = 0

# continue what, the program doesn't say
NEED_TO_CONTINUE_FLAG = True

# init the SAMPLING_METHOD
SAMPLING_METHOD = -1

# starting from QID number
# option1(Real ONE):
# START_FROM_WHAT_ID_NUM = 205
# option2:
START_FROM_WHAT_ID_NUM = -1

# max number of query length
MAX_NUM_QUERY_LENGTH = 10


# values:
# SAMPLING_METHOD = 1 # OLD sampling method
# SAMPLING_METHOD = 2 # CURRENT sampling method
NUM_POSTINGS_PICKED_FOR_EACH_TERM = 100
NUM_POSTINGS_PICKED_FOR_EACH_TERM_FROM_TOP_100 = int( NUM_POSTINGS_PICKED_FOR_EACH_TERM / 2 )
NUM_POSTINGS_PICKED_FOR_EACH_TERM_FROM_OUT_OF_TOP_100 = int(NUM_POSTINGS_PICKED_FOR_EACH_TERM - NUM_POSTINGS_PICKED_FOR_EACH_TERM_FROM_TOP_100)

print "Program meta info display begins..."
print "TOTAL_NUMBER_OF_TRAINING_POSTING_NEEDED:",TOTAL_NUMBER_OF_TRAINING_POSTING_NEEDED
print "Program meta info display ends."

# choose the SAMPLING METHOD here
SAMPLING_METHOD = 3
if SAMPLING_METHOD == 1:
    print "OLD sampling method which I have NOT remembered(DO NOT use this sampling method since 2013/07/13)"
elif SAMPLING_METHOD == 2:
    print "Individually select for each query term by the following: [TOP100: a fix amount NOT_TOP100: a fix amount]"
elif SAMPLING_METHOD == 3:
    print "Random select either a fixed amount/a fixed percentage from the whole result range without any TOP100 and NOT TOP100 differences"

# Updated by Wei 2013/02/21
# The following settings are for the SAMPLING_METHOD = 1
NumOfDocsSelectedFromTop10 = 4
# currently NOT used for the top10000 result
NumOfDocsSelectedFromTop50 = 2
NumOfDocsSelectedFromTop100 = 4
NumOfDocsSelectedFromTop1000 = 4
NumOfDocsSelectedFromTop10000 = 4


numOfQueriesHavingQID = 0
numOfQueriesHavingSearchContent = 0
numOfQueriesHavingSearchResults = 0

# in production mode
# option1
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/resultsNeededToAnalyze.txt"
# option2
# inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/smallResultSetNeededToBeAnalyzed.txt"
# option3
# inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/20QueriesSmallResultSetNeededToBeAnalyzed.txt"
# option4(for debug ONLY)
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/resultsNeededToAnalyze.txt_head_1000"
# option5(Updated by Wei 2013/06/07)
# in production mode
# inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/rawORSemanticsResultsForHead1000ResultsForTraining20130607"
# in debug
# inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/babyRawResultsForTrainingInstanceDevelopment"
# in production
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/rawResultsHead10KANDSemanticsTOP2MResults"
# in debug mode
# inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/rawORSemanticsResultsForHead1000ResultsForTraining_for_debug"
inputFileHandler = open(inputFileName,"r")

# output the main training file:
# options
# outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train"
# outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_02_22.smallTest.train"
# outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_06_09.smallTest.train"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_19.train"
outputFileHandler = open(outputFileName,"w")

# output the aux training file:
# options
# outputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.aux"
# outputAuxFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_06_09.smallTest.aux"
outputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_19.aux"
outputAuxFileHandler = open(outputAuxFileName,"w")

# the header files for the two files
outputFileHandler.write("SelectedRankIndex queryID externalTrecID internalDocID term partialBM25ScoreComponentPart1_IDF partialBM25ScoreComponentPart2_TF partialBM25 length_of_inverted_index term_freq_in_doc doc_words overallBM25Score rank_in_result_list" + "\n")
outputAuxFileHandler.write("querryID LengthOfQuery NumberOfDocsSelected SelectedRankIndexList" + "\n")

currentLine = inputFileHandler.readline()
nextLine = ""
currentQID = ""
numOfResultsForTheAllQueries = 0

currentQueryProcessFlag = False
NEED_TO_CONTINUE_FLAG = True

while currentLine and NEED_TO_CONTINUE_FLAG:
    # sampling parsing line:
    # qid: 701
    if currentLine.startswith("qid:"):
        currentQID = currentLine.strip().split(":")[1].strip()
        print "currentQID:",currentQID
        currentQIDInInt = int(currentQID)
        if currentQIDInInt >= START_FROM_WHAT_ID_NUM:
            currentQueryProcessFlag = True
            print "STATUS:PROCESS"
        else:
            currentQueryProcessFlag = False
            print "STATUS:SKIP"
        numOfQueriesHavingQID += 1
    
    # sample parsing line:
    # Search: u s  oil industry history
    if currentLine.startswith("Search:") and currentQueryProcessFlag:
        elementList = currentLine.strip().split(" ")
        
        currentSearchContent = ""
        
        queryTermList = []
        for element in elementList[1:]:
            if element.strip() != "":
                queryTermList.append( element.strip() )        
        currentQueryLength = len(queryTermList)
        print "currentQueryLength:",currentQueryLength
        
        if currentQueryLength > MAX_NUM_QUERY_LENGTH:
            print "Exceed The Query Max Length, SKIP"
            currentQueryProcessFlag = False
            continue

        for element in queryTermList:
            currentSearchContent += element + " "
        
        print currentSearchContent
        numOfQueriesHavingSearchContent += 1
        
        
        currentQueryTermIndexDict = {}
        nextLine = inputFileHandler.readline()
        #print nextLine.strip()
        if nextLine.strip() != "":
            nextLine = inputFileHandler.readline()
            # print "nextLine(debug):",nextLine
            nextLineElements = nextLine.strip().split(" ")
            # print "nextLineElements:",nextLineElements
            # sample parsing line: 
            # oil:0 industry:1 history:2 u:3 s:4
            for element in nextLineElements:
                if element.split(":")[0] not in currentQueryTermIndexDict:
                    currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
            
            print "currentQueryTermIndexDict:",currentQueryTermIndexDict
            # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
            
            currentLine = inputFileHandler.readline() # skip the line: currentLine(debug): threshold_socre_of_posting:0
            currentLine = inputFileHandler.readline() # skip the line: computation_method:x
            currentLine = inputFileHandler.readline() # skip the line: NEW Logistic Regression probability logic implementation by Wei 2013/06/19
            
            currentLine = inputFileHandler.readline() # Now, it is time to construct the training examples
            numOfResultsForTheCurrentQuery = 0
            currentResultLines = []
            
            # init process
            # This list will contain many tuples which has the term meta info
            # The num of list I prepared is 10
            currentQueryTerm0ResultList = []
            currentQueryTerm1ResultList = []
            currentQueryTerm2ResultList = []
            currentQueryTerm3ResultList = []
            currentQueryTerm4ResultList = []
            currentQueryTerm5ResultList = []
            currentQueryTerm6ResultList = []
            currentQueryTerm7ResultList = []
            currentQueryTerm8ResultList = []
            currentQueryTerm9ResultList = []
            currentQueryTermsWholeListOfLists = []
            
            currentQueryTermsWholeListOfLists.append(currentQueryTerm0ResultList)
            currentQueryTermsWholeListOfLists.append(currentQueryTerm1ResultList)
            currentQueryTermsWholeListOfLists.append(currentQueryTerm2ResultList)
            currentQueryTermsWholeListOfLists.append(currentQueryTerm3ResultList)
            currentQueryTermsWholeListOfLists.append(currentQueryTerm4ResultList)
            currentQueryTermsWholeListOfLists.append(currentQueryTerm5ResultList)
            currentQueryTermsWholeListOfLists.append(currentQueryTerm6ResultList)
            currentQueryTermsWholeListOfLists.append(currentQueryTerm7ResultList)
            currentQueryTermsWholeListOfLists.append(currentQueryTerm8ResultList)
            currentQueryTermsWholeListOfLists.append(currentQueryTerm9ResultList)
            
            # for debug ONLY begins...
            # print "currentLine(debug):",currentLine
            # print 'len( currentLine.strip().split(" ") )(debug):',len( currentLine.strip().split(" ") )
            # exit(1)
            # for debug ONLY ends.
            
            currentLineElements = currentLine.strip().split(" ")
            base = 1
            while len( currentLineElements ) == 65:
                numOfResultsForTheCurrentQuery += 1
                
                for i in range(0,len(currentQueryTermIndexDict)):
                    # training posting tuple instance format:
                    # (1) term, str( currentQueryTermIndexDict[i] )
                    # (2) internal_doc_id, str( currentLineElements[33] )
                    # (3) external_trec_id, str( currentLineElements[34] )
                    # (4) partialBM25_score, float( currentLineElements[base + i] )
                    # (5) freq_in_collection, int( currentLineElements[base + i + 10] )
                    # (6) freq_in_doc, int( currentLineElements[base + i + 10 + 10] )
                    # (7) doc_words(NOT distinct words), int( currentLineElements[31] )
                    # (8) result_rank_for_this_posting, int( currentLineElements[0] )
                    term = str( currentQueryTermIndexDict[i] )
                    internal_doc_id = str( currentLineElements[63] )
                    external_trec_id = str( currentLineElements[64] )
                    totalBM25_score = float( currentLineElements[62] )
                    # newly updated for each training posting instance
                    partialBM25_score_component_part1 = float( currentLineElements[base + 10 + i] )
                    partialBM25_score_component_part2 = float( currentLineElements[base + 10 + 10 + i] )
                    partialBM25_score = float( currentLineElements[base + 10 + 10 + 10 + i] )
                    freq_in_collection = int( currentLineElements[base + 10 + 10 + 10 + i + 10] )
                    freq_in_doc = int( currentLineElements[base + 10 + 10 + 10 + i + 10 + 10] )
                    doc_words = int( currentLineElements[61] )
                    result_rank_for_this_posting = int( currentLineElements[0] )
                    
                    if partialBM25_score != 0.0:
                        postingTuple = (term, internal_doc_id, external_trec_id, totalBM25_score, partialBM25_score_component_part1, partialBM25_score_component_part2, partialBM25_score, freq_in_collection, freq_in_doc, doc_words, result_rank_for_this_posting)
                        currentQueryTermsWholeListOfLists[i].append(postingTuple)
                currentResultLines.append(currentLine.strip())
                currentLine = inputFileHandler.readline()
                currentLineElements = currentLine.strip().split(" ")
            
            numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
            
            # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
            # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
            # This sampling method has been developed on 2013/01/09
            
            print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
            # for debug purpose
            # print "len(currentResultLines):",len(currentResultLines)
            for i in range(0,len(currentQueryTermsWholeListOfLists)):
                print "currentQueryTerm",str(i),"ResultList:",len(currentQueryTermsWholeListOfLists[i])
                if len(currentQueryTermsWholeListOfLists[i]) == 0:
                    continue
                '''
                if len(currentQueryTermsWholeListOfLists[i]) >= 2:
                    print currentQueryTermsWholeListOfLists[i][0],currentQueryTermsWholeListOfLists[i][1]
                '''

            # for debug ONLY
            # the following is the selection process
            # print "len(queryTermList):",len(queryTermList)
            if len(queryTermList) > 10:
                # The result list returned only have 10 holds so it only support the query which has the maximun length of 10
                pass
            else:
                
                if numOfResultsForTheCurrentQuery == 0:
                    pass
                    # print "currentQID:",currentQID
                    # print "currentSearchContent:",currentSearchContent
                    # print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
                    # print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                else:
                    numOfQueriesHavingSearchResults += 1
                
                
                selectedDocsIndexList = []
                
                # init process
                currentQueryTerm0SelectRankList = []
                currentQueryTerm1SelectRankList = []
                currentQueryTerm2SelectRankList = []
                currentQueryTerm3SelectRankList = []
                currentQueryTerm4SelectRankList = []
                currentQueryTerm5SelectRankList = []
                currentQueryTerm6SelectRankList = []
                currentQueryTerm7SelectRankList = []
                currentQueryTerm8SelectRankList = []
                currentQueryTerm9SelectRankList = []
                currentQueryTermsWholeSelectRankListOfLists = []
                
                currentQueryTermsWholeSelectRankListOfLists.append(currentQueryTerm0SelectRankList)
                currentQueryTermsWholeSelectRankListOfLists.append(currentQueryTerm1SelectRankList)
                currentQueryTermsWholeSelectRankListOfLists.append(currentQueryTerm2SelectRankList)
                currentQueryTermsWholeSelectRankListOfLists.append(currentQueryTerm3SelectRankList)
                currentQueryTermsWholeSelectRankListOfLists.append(currentQueryTerm4SelectRankList)
                currentQueryTermsWholeSelectRankListOfLists.append(currentQueryTerm5SelectRankList)
                currentQueryTermsWholeSelectRankListOfLists.append(currentQueryTerm6SelectRankList)
                currentQueryTermsWholeSelectRankListOfLists.append(currentQueryTerm7SelectRankList)
                currentQueryTermsWholeSelectRankListOfLists.append(currentQueryTerm8SelectRankList)
                currentQueryTermsWholeSelectRankListOfLists.append(currentQueryTerm9SelectRankList)
                
                
                # OLD sampling method
                if SAMPLING_METHOD == 1:
                    # This is truly an very naive way of selecting the documents
                    # But currently, just stick to it.
                    
                    if numOfResultsForTheCurrentQuery <= 10 and numOfResultsForTheCurrentQuery > 0:
                        #print "select docs from TOP10"
                        
                        # step1
                        if numOfResultsForTheCurrentQuery >= NumOfDocsSelectedFromTop10:
                            for i in range(0,NumOfDocsSelectedFromTop10):
                                currentSelectedDocIndex = random.randint(0, numOfResultsForTheCurrentQuery-1)
                                while currentSelectedDocIndex in selectedDocsIndexList:
                                    currentSelectedDocIndex = random.randint(0, numOfResultsForTheCurrentQuery-1)
                                selectedDocsIndexList.append(currentSelectedDocIndex)
                        else:
                            print "# of results do NOT satisfy the # of documents requried.(Just pass)"
                            pass
                        
                    elif numOfResultsForTheCurrentQuery <= 50 and numOfResultsForTheCurrentQuery > 10:
                        #print "select docs from TOP50"
        
                        #step1
                        for i in range(0,NumOfDocsSelectedFromTop10):
                            currentSelectedDocIndex = random.randint(0, 9)
                            while currentSelectedDocIndex in selectedDocsIndexList:
                                currentSelectedDocIndex = random.randint(0, 9)
                            selectedDocsIndexList.append(currentSelectedDocIndex)
        
                        #step2
                        if (numOfResultsForTheCurrentQuery-10) >= NumOfDocsSelectedFromTop50:
                            for i in range(0,NumOfDocsSelectedFromTop50):
                                currentSelectedDocIndex = random.randint(10, numOfResultsForTheCurrentQuery-1)
                                while currentSelectedDocIndex in selectedDocsIndexList:
                                    currentSelectedDocIndex = random.randint(10, numOfResultsForTheCurrentQuery-1)
                                selectedDocsIndexList.append(currentSelectedDocIndex)
                        else:
                            print "# of results do NOT satisfy the # of documents requried.(Just pass)"
                            pass                    
                                        
                    elif numOfResultsForTheCurrentQuery <= 100 and numOfResultsForTheCurrentQuery > 50:
                        #print "select docs from TOP100"
        
                        #step1
                        for i in range(0,NumOfDocsSelectedFromTop10):
                            currentSelectedDocIndex = random.randint(0, 9)
                            while currentSelectedDocIndex in selectedDocsIndexList:
                                currentSelectedDocIndex = random.randint(0, 9)
                            selectedDocsIndexList.append(currentSelectedDocIndex)
        
                        #step2
                        for i in range(0,NumOfDocsSelectedFromTop50):
                            currentSelectedDocIndex = random.randint(10, 49)
                            while currentSelectedDocIndex in selectedDocsIndexList:
                                currentSelectedDocIndex = random.randint(10, 49)
                            selectedDocsIndexList.append(currentSelectedDocIndex)
                        
                        #step3
                        if (numOfResultsForTheCurrentQuery-50) >= NumOfDocsSelectedFromTop100:
                            for i in range(0,NumOfDocsSelectedFromTop100):
                                currentSelectedDocIndex = random.randint(50, numOfResultsForTheCurrentQuery-1)
                                while currentSelectedDocIndex in selectedDocsIndexList:
                                    currentSelectedDocIndex = random.randint(50, numOfResultsForTheCurrentQuery-1)
                                selectedDocsIndexList.append(currentSelectedDocIndex)
                        else:
                            print "# of results do NOT satisfy the # of documents requried.(Just pass)"
                            pass
                    # updated by Wei 2013/02/21
                    # I know that this sampling alg. is NOT feasible, but stick to it for the current.
                    # NEW support for the 10000 documents 
                    elif numOfResultsForTheCurrentQuery <= 10000 and numOfResultsForTheCurrentQuery > 1000:
                        #step1
                        for i in range(0,NumOfDocsSelectedFromTop10):
                            currentSelectedDocIndex = random.randint(0, 9)
                            while currentSelectedDocIndex in selectedDocsIndexList:
                                currentSelectedDocIndex = random.randint(0, 9)
                            selectedDocsIndexList.append(currentSelectedDocIndex)
                        
                        # neglect the NumOfDocsSelectedFromTop50 from now.
                        '''
                        #step2
                        for i in range(0,NumOfDocsSelectedFromTop50):
                            currentSelectedDocIndex = random.randint(10, 49)
                            while currentSelectedDocIndex in selectedDocsIndexList:
                                currentSelectedDocIndex = random.randint(10, 49)
                            selectedDocsIndexList.append(currentSelectedDocIndex)
                        '''
                        
                        #step3
                        for i in range(0,NumOfDocsSelectedFromTop100):
                            currentSelectedDocIndex = random.randint(50, 99)
                            while currentSelectedDocIndex in selectedDocsIndexList:
                                currentSelectedDocIndex = random.randint(50, 99)
                            selectedDocsIndexList.append(currentSelectedDocIndex)
                            
                        #step4
                        for i in range(0,NumOfDocsSelectedFromTop1000):
                            currentSelectedDocIndex = random.randint(100, 999)
                            while currentSelectedDocIndex in selectedDocsIndexList:
                                currentSelectedDocIndex = random.randint(100, 999)
                            selectedDocsIndexList.append(currentSelectedDocIndex)                    
                                            
                        #step5
                        if (numOfResultsForTheCurrentQuery-1000) >= NumOfDocsSelectedFromTop100:
                            for i in range(0,NumOfDocsSelectedFromTop100):
                                currentSelectedDocIndex = random.randint(1000, numOfResultsForTheCurrentQuery-1)
                                while currentSelectedDocIndex in selectedDocsIndexList:
                                    currentSelectedDocIndex = random.randint(1000, numOfResultsForTheCurrentQuery-1)
                                selectedDocsIndexList.append(currentSelectedDocIndex)
                        else:
                            print "# of results do NOT satisfy the # of documents requried.(Just pass)"
                            pass
                
                # current Sampling method, updated by Wei 2013/06/07
                elif SAMPLING_METHOD == 2:
                    print "in sampling method 2:"
                    
                    
                    # Stop for a moment cause I am just feeling that I am suitable for the programming work today.
                    # Note: Updated by Wei 2013/06/08 night 2013/06/08
                    # The method I am currently doing is: fix number, say 100. But select randomly among the whole range
                    # I think even prof has some concerns about this
                    # This is NOT the type of Wei Jiang doing, let's do our business, show the power of Wei
                    '''
                    for i in range(0,len(currentQueryTermsWholeListOfLists)):
                        print "currentQueryTerm",str(i),"ResultList:",len(currentQueryTermsWholeListOfLists[i])
                        # select the proper postings happened here
                        NUM_OF_POSTINGS_CAN_PICK = -1
                        if NUM_POSTINGS_PICKED_FOR_EACH_TERM > len(currentQueryTermsWholeListOfLists[i]):
                            NUM_OF_POSTINGS_CAN_PICK = len(currentQueryTermsWholeListOfLists[i])
                        else:
                            NUM_OF_POSTINGS_CAN_PICK = NUM_POSTINGS_PICKED_FOR_EACH_TERM
                        
                        for j in range(0,NUM_OF_POSTINGS_CAN_PICK):
                            currentSelectedDocIndex = random.randint(theTOP100BoundaryIndexForTheFollowingList, len(currentQueryTermsWholeListOfLists[i]) )
                            while currentSelectedDocIndex in selectedDocsIndexList:
                                currentSelectedDocIndex = random.randint( theTOP100BoundaryIndexForTheFollowingList, len(currentQueryTermsWholeListOfLists[i]) )
                            selectedDocsIndexList.append(currentSelectedDocIndex)
                    '''
                    
                    # Note: Updated by Wei 2013/06/08
                    # It is under construction and has bugs. And also has theoretical problems:
                    # Why I need to pick 50 postings in the top100 but JUST NOT uniformly randomly select from the whole range.
                    
                    for i in range(0,len(currentQueryTermsWholeListOfLists)):
                        # find the boundary index
                        numOfPostingsInAndIncludeTOP100 = 0
                        numOfPostingsOutOfTOP100 = 0
                        
                        for index,tuple in enumerate( currentQueryTermsWholeListOfLists[i] ):
                            # (term, 
                            # internal_doc_id, 
                            # external_trec_id, 
                            # totalBM25_score, 
                            # partialBM25_score, 
                            # freq_in_collection, 
                            # freq_in_doc, 
                            # doc_words, 
                            # result_rank_for_this_posting)
                            (_, _, _, _, _, _, _, _, result_rank_for_this_posting) = tuple
                            if result_rank_for_this_posting > 100:
                                break
                            else:
                                numOfPostingsInAndIncludeTOP100 += 1
                        
                        numOfPostingsOutOfTOP100 = len(currentQueryTermsWholeListOfLists[i]) - numOfPostingsInAndIncludeTOP100 
                        print
                        print "currentQueryTerm",str(i),"ResultList:",len(currentQueryTermsWholeListOfLists[i])
                         
                        print "numOfPostingsInAndIncludeTOP100:",numOfPostingsInAndIncludeTOP100
                        print "numOfPostingsOutOfTOP100:",numOfPostingsOutOfTOP100
                        
                        # decide # of postings being picked from the TOP100
                        NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_TOP_100 = -1
                        if NUM_POSTINGS_PICKED_FOR_EACH_TERM_FROM_TOP_100 > numOfPostingsInAndIncludeTOP100:
                            NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_TOP_100 = numOfPostingsInAndIncludeTOP100
                        else:
                            NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_TOP_100 = NUM_POSTINGS_PICKED_FOR_EACH_TERM_FROM_TOP_100
                        print "NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_TOP_100:",NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_TOP_100
                        
                        # decide # of postings being picked from outside the TOP100
                        NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_OUT_OF_TOP_100 = -1
                        if NUM_POSTINGS_PICKED_FOR_EACH_TERM_FROM_OUT_OF_TOP_100 > numOfPostingsOutOfTOP100:
                            '''
                            # debug ONLY
                            print "mark1"
                            print "len(currentQueryTermsWholeListOfLists[i]):",len(currentQueryTermsWholeListOfLists[i])
                            '''
                            NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_OUT_OF_TOP_100 = numOfPostingsOutOfTOP100
                        else:
                            '''
                            print "mark2"
                            '''
                            NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_OUT_OF_TOP_100 = NUM_POSTINGS_PICKED_FOR_EACH_TERM_FROM_OUT_OF_TOP_100
                        print "NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_OUT_OF_TOP_100:",NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_OUT_OF_TOP_100
                        
                        # actual selection process for the TOP100: select the rank list from the TOP100
                        # for the function random.randint, this needs to be taken care of
                        for j in range(0,NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_TOP_100):
                            currentSelectedDocIndex = random.randint(0,numOfPostingsInAndIncludeTOP100-1)
                            while currentSelectedDocIndex in currentQueryTermsWholeSelectRankListOfLists[i]:
                                currentSelectedDocIndex = random.randint(0,numOfPostingsInAndIncludeTOP100-1)
                            currentQueryTermsWholeSelectRankListOfLists[i].append(currentSelectedDocIndex)
                        
                        # actual selection process for out of the TOP100: select the rank list from out of TOP100
                        for j in range(0,NUM_OF_POSTINGS_ACTUALLY_PICK_FROM_OUT_OF_TOP_100):
                            currentSelectedDocIndex = random.randint(numOfPostingsInAndIncludeTOP100,len(currentQueryTermsWholeListOfLists[i])-1 )
                            while currentSelectedDocIndex in currentQueryTermsWholeSelectRankListOfLists[i]:
                                currentSelectedDocIndex = random.randint(numOfPostingsInAndIncludeTOP100,len(currentQueryTermsWholeListOfLists[i])-1 )
                            currentQueryTermsWholeSelectRankListOfLists[i].append(currentSelectedDocIndex)

                        currentQueryTermsWholeSelectRankListOfLists[i].sort(cmp=None, key=None, reverse=False) 
                        print "len(currentQueryTermsWholeSelectRankListOfLists[i]):",len(currentQueryTermsWholeSelectRankListOfLists[i])
                        print "currentQueryTermsWholeSelectRankListOfLists[i]:",currentQueryTermsWholeSelectRankListOfLists[i]
                        
                        if len( currentQueryTermsWholeSelectRankListOfLists[i] ) != 0:
                            outputAuxLine = currentQID + " " + str(currentQueryLength) + " " + str(currentQueryTermIndexDict[i]) + " " + str( len( currentQueryTermsWholeSelectRankListOfLists[i] ) ) + " "
                            for currentSelectedDocIndex in currentQueryTermsWholeSelectRankListOfLists[i]:
                                outputAuxLine += str(currentSelectedDocIndex) + " "
                            outputAuxLine += "\n"
                            outputAuxFileHandler.write(outputAuxLine)
                            
                            for currentSelectedDocIndex in currentQueryTermsWholeSelectRankListOfLists[i]:
                                # print "currentSelectedDocIndex:",currentSelectedDocIndex
                                # print "currentQueryTermsWholeListOfLists[i][currentSelectedDocIndex](test):",currentQueryTermsWholeListOfLists[i][currentSelectedDocIndex]
                                # (term, internal_doc_id, external_trec_id, totalBM25_score, partialBM25_score, freq_in_collection, freq_in_doc, doc_words, result_rank_for_this_posting)
                                # print "i:",i
                                # print "currentSelectedDocIndex:",currentSelectedDocIndex
                                (outputTerm, outputInternalDocID, outputExternalTrecID, outputTotalBM25Score, partialBM25_score_component_part1, partialBM25_score_component_part2, outputTermPartialBM25Score, outputTermInCollectionFreq, outputTermInDocFreq, outputTermDocWordsSize, outputRank) = currentQueryTermsWholeListOfLists[i][currentSelectedDocIndex]
                            
                                # index values and their meaning:
                                # 0: ranking for this query from the IR system
                                # 31: document size
                                # 32: total BM25 score
                                # 33: trecID
                                # print "currentLineElements:",currentLineElements
                                # print "len(queryTermList):",len(queryTermList)
                                
                                outputQID = currentQID
                                outputTrainingExample = str(currentSelectedDocIndex) + " " + outputQID + " " + outputExternalTrecID + " " + str(outputInternalDocID) + " " + outputTerm + " " + str(partialBM25_score_component_part1) + " " + str(partialBM25_score_component_part2) + " " + str(outputTermPartialBM25Score) + " " + str(outputTermInCollectionFreq) + " " + str(outputTermInDocFreq) + " " + str(outputTermDocWordsSize) + " " + str(outputTotalBM25Score) + " " + str(outputRank)
                                outputFileHandler.write(outputTrainingExample + "\n")
                                TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED += 1
                # Sampling method = 3, updated by Wei 2013/07/11 night
                elif SAMPLING_METHOD == 3:
                    print "in sampling method 3:"
                    # Let's just select a fix amount of postings from the whole result range.
                    for i in range(0,len(currentQueryTermsWholeListOfLists)):
                        
                        if len(currentQueryTermsWholeListOfLists[i]) != 0:
                            NUM_OF_POSTINGS_CAN_PICK = -1
                            if NUM_POSTINGS_PICKED_FOR_EACH_TERM > len(currentQueryTermsWholeListOfLists[i]):
                                NUM_OF_POSTINGS_CAN_PICK = len(currentQueryTermsWholeListOfLists[i])
                            else:
                                NUM_OF_POSTINGS_CAN_PICK = NUM_POSTINGS_PICKED_FOR_EACH_TERM
                                
                            print "NUM_OF_POSTINGS_CAN_PICK:",NUM_OF_POSTINGS_CAN_PICK
                            
                            # actual selection process for the postings
                            for j in range(0,NUM_OF_POSTINGS_CAN_PICK):
                                currentSelectedDocIndex = random.randint(0,len(currentQueryTermsWholeListOfLists[i])-1 )
                                while currentSelectedDocIndex in currentQueryTermsWholeSelectRankListOfLists[i]:
                                    currentSelectedDocIndex = random.randint(0,len(currentQueryTermsWholeListOfLists[i])-1 )
                                currentQueryTermsWholeSelectRankListOfLists[i].append(currentSelectedDocIndex)
                                TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_PICKED += 1
                                # the judgement statement
                                if TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_PICKED >= TOTAL_NUMBER_OF_TRAINING_POSTING_NEEDED:
                                    # need to debug
                                    NEED_TO_CONTINUE_FLAG = False
                                    break
    
                            currentQueryTermsWholeSelectRankListOfLists[i].sort(cmp=None, key=None, reverse=False) 
                            print "NUM_OF_POSTINGS_ACTUALLY_PICK for term index",i,":",len(currentQueryTermsWholeSelectRankListOfLists[i])
                            print "currentQueryTermsWholeSelectRankListOfLists[i]:",currentQueryTermsWholeSelectRankListOfLists[i]
                            
                            if len( currentQueryTermsWholeSelectRankListOfLists[i] ) != 0:
                                outputAuxLine = currentQID + " " + str(currentQueryLength) + " " + str(currentQueryTermIndexDict[i]) + " " + str( len( currentQueryTermsWholeSelectRankListOfLists[i] ) ) + " "
                                for currentSelectedDocIndex in currentQueryTermsWholeSelectRankListOfLists[i]:
                                    outputAuxLine += str(currentSelectedDocIndex) + " "
                                outputAuxLine += "\n"
                                outputAuxFileHandler.write(outputAuxLine)
                                
                                for currentSelectedDocIndex in currentQueryTermsWholeSelectRankListOfLists[i]:
                                    # print "currentSelectedDocIndex:",currentSelectedDocIndex
                                    # print "currentQueryTermsWholeListOfLists[i][currentSelectedDocIndex](test):",currentQueryTermsWholeListOfLists[i][currentSelectedDocIndex]
                                    # (term, internal_doc_id, external_trec_id, totalBM25_score, partialBM25_score, freq_in_collection, freq_in_doc, doc_words, result_rank_for_this_posting)
                                    # print "i:",i
                                    # print "currentSelectedDocIndex:",currentSelectedDocIndex
                                    (outputTerm, outputInternalDocID, outputExternalTrecID, outputTotalBM25Score, partialBM25_score_component_part1, partialBM25_score_component_part2, outputTermPartialBM25Score, outputTermInCollectionFreq, outputTermInDocFreq, outputTermDocWordsSize, outputRank) = currentQueryTermsWholeListOfLists[i][currentSelectedDocIndex]
                                
                                    # index values and their meaning:
                                    # 0: ranking for this query from the IR system
                                    # 31: document size
                                    # 32: total BM25 score
                                    # 33: trecID
                                    # print "currentLineElements:",currentLineElements
                                    # print "len(queryTermList):",len(queryTermList)
                                    
                                    outputQID = currentQID
                                    outputTrainingExample = str(currentSelectedDocIndex) + " " + outputQID + " " + outputExternalTrecID + " " + str(outputInternalDocID) + " " + outputTerm + " " + str(partialBM25_score_component_part1) + " " + str(partialBM25_score_component_part2) + " " + str(outputTermPartialBM25Score) + " " + str(outputTermInCollectionFreq) + " " + str(outputTermInDocFreq) + " " + str(outputTermDocWordsSize) + " " + str(outputTotalBM25Score) + " " + str(outputRank)
                                    outputFileHandler.write(outputTrainingExample + "\n")
                                    TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED += 1
                            # This is actually for the final break
                            if not NEED_TO_CONTINUE_FLAG:
                                print "Get enough original posting training examples :)"
                                break # This break will help me break to the most outside while loop and it is also controlled by the NEED_TO_CONTINUE_FLAG flag
                        else:
                            pass
                            # print "the result list of the term index:",i,"do NOT contain any results"
            print
        
        currentQueryProcessFlag = False
    currentLine = inputFileHandler.readline()
    
    '''
    # for debug ONLY logic begins...
    # for the stopping signal: 
    if numOfQueriesHavingQID == 100:
        break
    # for debug ONLY logic ends.
    '''

print "Overall Sampling Statistics:"   
print "numOfQueriesHavingQID:",numOfQueriesHavingQID
print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
print "numOfQueriesHavingSearchResults:",numOfQueriesHavingSearchResults
print "SAMPLING_METHOD:",SAMPLING_METHOD
print "NUM_POSTINGS_PICKED_FOR_EACH_TERM:",NUM_POSTINGS_PICKED_FOR_EACH_TERM
print "TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED:",TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED
print "TOTAL_NUMBER_OF_TRAINING_POSTING_NEEDED:",TOTAL_NUMBER_OF_TRAINING_POSTING_NEEDED
if TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED < TOTAL_NUMBER_OF_TRAINING_POSTING_NEEDED:
    print "--->NOT sample enough examples"
elif TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED == TOTAL_NUMBER_OF_TRAINING_POSTING_NEEDED:
    print "--->Sample exactly the amount of examples requested"
elif TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED > TOTAL_NUMBER_OF_TRAINING_POSTING_NEEDED:
    print "--->Sample too many examples"

print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
print "outputAuxFileName:",outputAuxFileName
# Updated by Wei 2013/06/07
# The question now is what we currently have and how to select the posting examples for each query term among the query. 

# incidents report  
# That night is 2013/01/09, Bluma filled the folder and then my program breaks.
# This is the ONLY piece I left for this. God bless me. Wei Jiang

# sampling method 2: among all the postings in top10, select some number of postings. Doesn't take consideration of the individual document
# This method DOES NOT make sure that once the document have been selected, ALL the coresponding postings will also be selected.
# This sampling method has been developed weeks ago.

print "Program Ends."