# Program Purpose: 
# (1) This program should be dynamically changed in order to fit the different features for the arff training file
# (2) This is the program for making the arff format ML training file

from __future__ import division
from operator import itemgetter, attrgetter
import os
import sys
import math

def constructMetaDataLines(outputFileHandler, phaseNumber):
    if phaseNumber == 0:
        print "updated 2014/09/12 by Wei"
        print "constructMetaDataLines for the arff format training file"
        outputFileHandler.write("@relation learning_to_estimate_COST_GAIN_20140912_for_tiering" + "\n")
        
        outputFileHandler.write("@attribute queryID string" + "\n")
        # outputFileHandler.write("@attribute queryContent string" + "\n")
        outputFileHandler.write("@attribute queryLength numeric" + "\n")
        outputFileHandler.write("@attribute ll1_tier1 numeric" + "\n")
        outputFileHandler.write("@attribute ll2_tier1 numeric" + "\n")
        outputFileHandler.write("@attribute ll3_tier1 numeric" + "\n")
        outputFileHandler.write("@attribute ll4_tier1 numeric" + "\n")
        outputFileHandler.write("@attribute ll5_tier1 numeric" + "\n")
        outputFileHandler.write("@attribute ll6_tier1 numeric" + "\n")
        outputFileHandler.write("@attribute ll7_tier1 numeric" + "\n")
        outputFileHandler.write("@attribute ll8_tier1 numeric" + "\n")
        outputFileHandler.write("@attribute ll9_tier1 numeric" + "\n")
        outputFileHandler.write("@attribute ll10_tier1 numeric" + "\n")
        
        outputFileHandler.write("@attribute ll1_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll2_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll3_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll4_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll5_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll6_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll7_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll8_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll9_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll10_tier2 numeric" + "\n")

        outputFileHandler.write("@attribute ll1_tier1_to_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll2_tier1_to_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll3_tier1_to_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll4_tier1_to_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll5_tier1_to_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll6_tier1_to_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll7_tier1_to_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll8_tier1_to_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll9_tier1_to_tier2 numeric" + "\n")
        outputFileHandler.write("@attribute ll10_tier1_to_tier2 numeric" + "\n")        
        
        outputFileHandler.write("@attribute resultSizeUnderAND_tier1 numeric" + "\n")
        outputFileHandler.write("@attribute resultSizeUnderOR_tier1 numeric" + "\n")
        # outputFileHandler.write("@attribute realQueryProcessingCostAND_tier1 numeric" + "\n")
        outputFileHandler.write("@attribute realQueryProcessingCostOR_tier1 numeric" + "\n")
        # trying to learn labels:
        outputFileHandler.write("@attribute gain numeric" + "\n")
        outputFileHandler.write("@attribute cost numeric" + "\n")
        
              
    elif phaseNumber == 1:
        print "updated 2013/07/27 by Wei"
        print "constructMetaDataLines for the arff format training file"
        
        outputFileHandler.write("@relation learning_to_prune_20130727_Phase1_Pruning" + "\n")
        # 5
        outputFileHandler.write("@attribute SelectedRankIndex numeric" + "\n")
        outputFileHandler.write("@attribute queryID string" + "\n")
        outputFileHandler.write("@attribute externalTrecID string" + "\n")
        outputFileHandler.write("@attribute internalDocID string" + "\n")
        outputFileHandler.write("@attribute term string" + "\n")
        
        # 5
        outputFileHandler.write("@attribute partialBM25ScoreComponentPart1_IDF real" + "\n")
        outputFileHandler.write("@attribute partialBM25ScoreComponentPart2_TF real" + "\n")
        outputFileHandler.write("@attribute partialBM25 real" + "\n")
        outputFileHandler.write("@attribute length_of_the_inverted_index numeric" + "\n")
        outputFileHandler.write("@attribute term_freq_in_doc numeric" + "\n")
        
        # 5
        outputFileHandler.write("@attribute doc_words numeric" + "\n")
        outputFileHandler.write("@attribute overallBM25Score real" + "\n")
        outputFileHandler.write("@attribute rank_in_result_list numeric" + "\n")
        outputFileHandler.write("@attribute term_freq_in_training_head95K_queries numeric" + "\n")
        outputFileHandler.write("@attribute term_freq_in_collection numeric" + "\n")
        
        # 4
        outputFileHandler.write("@attribute postingRankInDoc numeric" + "\n")
        outputFileHandler.write("@attribute postingRankInList numeric" + "\n")
        outputFileHandler.write("@attribute percentageForPostingRankInDoc real" + "\n")
        outputFileHandler.write("@attribute percentageForPostingRankInList real" + "\n")
        
        # 5
        outputFileHandler.write("@attribute TOP10Label {True,False}" + "\n")
        outputFileHandler.write("@attribute TOP100Label {True,False}" + "\n")
        outputFileHandler.write("@attribute TOP1000Label {True,False}" + "\n")
        outputFileHandler.write("@attribute TOP10000Label {True,False}" + "\n")
        outputFileHandler.write("@attribute TOP100000Label {True,False}" + "\n")
        
    elif phaseNumber == 2:
        # long time have NOT been updated
        print "updated 2013/04/03 by Wei"
        print "constructMetaDataLines for weka training file"
    
        outputFileHandler.write("@relation learning_to_prune_20130402_TESTING_seperate_X_Axis_and_Y_Axis" + "\n")
        
        outputFileHandler.write("@attribute queryID string" + "\n")
        outputFileHandler.write("@attribute trecID string" + "\n")
        outputFileHandler.write("@attribute term string" + "\n")
        
        outputFileHandler.write("@attribute partialBM25 real" + "\n")
        outputFileHandler.write("@attribute length_of_the_inverted_index real" + "\n")
        outputFileHandler.write("@attribute term_freq_in_doc real" + "\n")
        outputFileHandler.write("@attribute doc_words real" + "\n")
        outputFileHandler.write("@attribute overallBM25Score real" + "\n")
        
        outputFileHandler.write("@attribute rank_in_this_results_list_for_this_query real" + "\n")
        outputFileHandler.write("@attribute term_freq_in_queries real" + "\n")
        
        outputFileHandler.write("@attribute TOP10Label {False,True}" + "\n")
        # I believe the following feature is online computation here
        outputFileHandler.write("@attribute TOP100Label {False,True}" + "\n")
    
        outputFileHandler.write("@attribute posting_rank_in_list numeric" + "\n")
        outputFileHandler.write("@attribute posting_rank_in_doc numeric" + "\n")
        outputFileHandler.write("@attribute term_freq_in_collection numeric" + "\n")
        
        '''
        outputFileHandler.write("@attribute f1NULL numeric" + "\n")
        outputFileHandler.write("@attribute f2NULL numeric" + "\n")
        outputFileHandler.write("@attribute f3NULL numeric" + "\n")
        outputFileHandler.write("@attribute f4NULL numeric" + "\n")
        outputFileHandler.write("@attribute f5NULL numeric" + "\n")
        '''
        
        
        # extra 25 term dependency features
        outputFileHandler.write("@attribute f11 numeric" + "\n")
        outputFileHandler.write("@attribute f21 numeric" + "\n")
        outputFileHandler.write("@attribute f31 numeric" + "\n")
        outputFileHandler.write("@attribute f41 numeric" + "\n")
        outputFileHandler.write("@attribute f51 numeric" + "\n")
        
        outputFileHandler.write("@attribute f12 numeric" + "\n")
        outputFileHandler.write("@attribute f22 numeric" + "\n")
        outputFileHandler.write("@attribute f32 numeric" + "\n")
        outputFileHandler.write("@attribute f42 numeric" + "\n")
        outputFileHandler.write("@attribute f52 numeric" + "\n")
        
        outputFileHandler.write("@attribute f13 numeric" + "\n")
        outputFileHandler.write("@attribute f23 numeric" + "\n")
        outputFileHandler.write("@attribute f33 numeric" + "\n")
        outputFileHandler.write("@attribute f43 numeric" + "\n")
        outputFileHandler.write("@attribute f53 numeric" + "\n")
        
        outputFileHandler.write("@attribute f14 numeric" + "\n")
        outputFileHandler.write("@attribute f24 numeric" + "\n")
        outputFileHandler.write("@attribute f34 numeric" + "\n")
        outputFileHandler.write("@attribute f44 numeric" + "\n")
        outputFileHandler.write("@attribute f54 numeric" + "\n")
        
        outputFileHandler.write("@attribute f15 numeric" + "\n")
        outputFileHandler.write("@attribute f25 numeric" + "\n")
        outputFileHandler.write("@attribute f35 numeric" + "\n")
        outputFileHandler.write("@attribute f45 numeric" + "\n")
        outputFileHandler.write("@attribute f55 numeric" + "\n")
        
        
        # backup output meta lines
        '''
        outputFileHandler.write("@attribute doc_distinct_words real" + "\n")
        
        outputFileHandler.write("@attribute text_size real" + "\n")
        outputFileHandler.write("@attribute script_size real" + "\n")
        outputFileHandler.write("@attribute script_text_ratio real" + "\n")
        
        outputFileHandler.write("@attribute doc_links real" + "\n")
        
        outputFileHandler.write("@attribute whether_in_header {False,True}" + "\n")
        outputFileHandler.write("@attribute whether_in_title {False,True}" + "\n")
        outputFileHandler.write("@attribute whether_in_bold {False,True}" + "\n")
        outputFileHandler.write("@attribute whether_in_url {False,True}" + "\n")
        outputFileHandler.write("@attribute whether_in_italic {False,True}" + "\n")
        # outputFileHandler.write("@attribute current_term_QL real" + "\n")
        # outputFileHandler.write("@attribute current_term_distribution real" + "\n")
        '''
    
        '''
        # labels section
        outputFileHandler.write("@attribute TOP10Label {False,True}" + "\n")
        outputFileHandler.write("@attribute TOP50Label {False,True}" + "\n")
        outputFileHandler.write("@attribute TOP100Label {False,True}" + "\n")
        outputFileHandler.write("@attribute TOP11To50Label {False,True}" + "\n")
        outputFileHandler.write("@attribute TOP51To100Label {False,True}" + "\n")
        '''
    else:
        print "NOT support phase number"
        exit(1)

def constructDataLinesOption2(outputFileHandler,sourceFileHandler):
    print "constructDataLinesOption2"
    
    # skip the infoHeadLine
    sourceFileHandler.readline()
    
    outputFileHandler.write("@data" + "\n")
    
    for index,currentLine in enumerate( sourceFileHandler.readlines() ):
        outputFileHandler.write(currentLine)

def constructDataLinesOption1(sourceFileHandler,outputFileHandler,phaseNumber):
    print "constructDataLinesOption1(...) called"
    if phaseNumber == 0:
        outputFileHandler.write("@data" + "\n")
        outputLine = ""
        l = sourceFileHandler.readline()
        while l:
            outputLine = ""
            le = l.strip().split(":")
            queryID = le[0]
            queryContent = "'" + le[1] + "'"
            queryLength = int(le[2])
            listLengthsInTier1Elements = le[3].strip().split(" ")
            listLengthsInTier2Elements = le[4].strip().split(" ")
            resultSizeUnderAND_Tier1 = le[-1]
            resultSizeUnderOR_Tier1 = le[-5]
            realQueryProcessingCostAND_tier1 = le[-4]
            gain = le[-3]
            cost = le[-2]
            outputLine += str(queryID) + ","
            # outputLine += str(queryContent) + ","
            outputLine += str(queryLength) + ","
            upperBound = 0
            if queryLength >= 10:
                upperBound = 10
            else:
                upperBound = queryLength
            
            # tier1
            for i in range(0,upperBound):
                outputLine += str(listLengthsInTier1Elements[i]) + ","
            for i in range(0,10-upperBound):
                outputLine += "?" + ","
            
            # tier2
            for i in range(0,upperBound):
                outputLine += str(listLengthsInTier2Elements[i]) + ","
            for i in range(0,10-upperBound):
                outputLine += "?" + ","
                
            # tier1 / tier2
            for i in range(0,upperBound):
                if float(listLengthsInTier2Elements[i]) == 0:
                    outputLine += str("0") + ","
                else:
                    # print "listLengthsInTier2Elements[i]:",listLengthsInTier2Elements[i]
                    outputLine += str(float(listLengthsInTier1Elements[i])/float(listLengthsInTier2Elements[i])) + ","
            for i in range(0,10-upperBound):
                outputLine += "?" + ","            
            
            outputLine += str(resultSizeUnderAND_Tier1) + ","
            outputLine += str(resultSizeUnderOR_Tier1) + ","
            outputLine += str(realQueryProcessingCostAND_tier1) + ","
            outputLine += str(gain) + ","
            outputLine += str(cost)
            print outputLine
            outputFileHandler.write(outputLine + "\n")
            l = sourceFileHandler.readline()
    elif phaseNumber == 1:
        # SWITCH ON/OFF for headline
        sourceFileHandler.readline()
        
        outputFileHandler.write("@data" + "\n")
        
        for index,currentLine in enumerate( sourceFileHandler.readlines() ):
            currentLineElementsList = currentLine.strip().split(" ")
            
            # for debug purposes, can know the number of the line elements
            # print "len(currentLineElementsList):",len(currentLineElementsList)
            # exit(1)
            
            # Current version Updated by Wei 2013/07/16
            # for filling the data lines
            outputFileHandler.write(currentLine)
            
            # OLD version months ago
            # main disadvantage: decompose each line and reconstruct them
            '''
            if len(currentLineElementsList) == 16:
                selectedIndexForThisTermPosting = currentLineElementsList[0]
                queryID = currentLineElementsList[1]
                externalTrecID = currentLineElementsList[2]
                internalDocID = currentLineElementsList[3]
                term = currentLineElementsList[4]
                partialBM25 = currentLineElementsList[5]
                length_of_the_inverted_index = currentLineElementsList[6]
                term_freq_in_doc = currentLineElementsList[7]
                doc_words = currentLineElementsList[8]
                overallBM25Score = currentLineElementsList[9]
                rank_in_this_results_list_for_this_query = currentLineElementsList[10]
                term_freq_in_95Kqueries = currentLineElementsList[11]
                top100label = currentLineElementsList[12]
                posting_rank_in_list = currentLineElementsList[13]
                posting_rank_in_doc_ONLY_contains_query_terms = currentLineElementsList[14]
                term_freq_in_collection = currentLineElementsList[15]
                
                # training example constructed for weka
                trainingExampleLine = ""
                
                trainingExampleLine += selectedIndexForThisTermPosting + " " # index 0
                trainingExampleLine += queryID + " " # index 1
                trainingExampleLine += externalTrecID + " " # index 2
                trainingExampleLine += internalDocID + " " # index 3
                trainingExampleLine += term + " " # index 4
                trainingExampleLine += partialBM25 + " " # index 5
                trainingExampleLine += length_of_the_inverted_index + " " # index 6
                trainingExampleLine += term_freq_in_doc + " " # index 7
                trainingExampleLine += doc_words + " " # index 8
                trainingExampleLine += overallBM25Score + " " # index 9
                trainingExampleLine += rank_in_this_results_list_for_this_query + " " # index 10
                trainingExampleLine += term_freq_in_95Kqueries + " " # index 11
                trainingExampleLine += top100label + " " # index 12
                trainingExampleLine += posting_rank_in_list + " " # index 13
                trainingExampleLine += posting_rank_in_doc_ONLY_contains_query_terms + " " # index 14
                trainingExampleLine += term_freq_in_collection + " " # index 15
                
                outputFileHandler.write(trainingExampleLine + "\n")
            else:
                print "Format NOT right."
                print "# of elements:",len(currentLineElementsList)
                sys.exit(1)
            '''
            
    elif phaseNumber == 2:
        # skip the infoHeadLine
        sourceFileHandler.readline()
        
        outputFileHandler.write("@data" + "\n")
        
        
        #currentLine = sourceFileHandler.readline()
        # The following line output is: 49
        # print len(currentLine.strip().split(" "))
        
        
        for index,currentLine in enumerate( sourceFileHandler.readlines() ):
            currentLineElementsList = currentLine.strip().split(" ")
            
            # for debug purposes
            # print "len(currentLineElementsList):",len(currentLineElementsList)
            # exit(1)
            
            if len(currentLineElementsList) == 39:
                
                trecID = currentLineElementsList[0]
                queryID = currentLineElementsList[1]
                term = currentLineElementsList[2]
                
                partialBM25 = currentLineElementsList[3]
                length_of_the_inverted_index = currentLineElementsList[4]
                term_freq_in_doc = currentLineElementsList[5]
                doc_words = currentLineElementsList[6]
                overallBM25Score = currentLineElementsList[7]
                rank_in_this_results_list_for_this_query = currentLineElementsList[8]
                term_freq_in_queries = currentLineElementsList[9]
                labelTop10 = currentLineElementsList[10]
                # labelTop100 is computed based on another feature called: rank_in_this_results_list_for_this_query 
                posting_rank_in_list = currentLineElementsList[11]
                posting_rank_in_doc = currentLineElementsList[12]
                term_freq_in_collection = currentLineElementsList[13]
                
                f11Value = currentLineElementsList[14]
                f21Value = currentLineElementsList[15]
                f31Value = currentLineElementsList[16]
                f41Value = currentLineElementsList[17]
                f51Value = currentLineElementsList[18]
                
                f12Value = currentLineElementsList[19]
                f22Value = currentLineElementsList[20]
                f32Value = currentLineElementsList[21]
                f42Value = currentLineElementsList[22]
                f52Value = currentLineElementsList[23]            
    
                f13Value = currentLineElementsList[24]
                f23Value = currentLineElementsList[25]
                f33Value = currentLineElementsList[26]
                f43Value = currentLineElementsList[27]
                f53Value = currentLineElementsList[28]
    
                f14Value = currentLineElementsList[29]
                f24Value = currentLineElementsList[30]
                f34Value = currentLineElementsList[31]
                f44Value = currentLineElementsList[32]
                f54Value = currentLineElementsList[33]
                
                f15Value = currentLineElementsList[34]
                f25Value = currentLineElementsList[35]
                f35Value = currentLineElementsList[36]
                f45Value = currentLineElementsList[37]
                f55Value = currentLineElementsList[38]
                
                '''
                # 2013/03/18 testing ONLY
                f1NULLValueInIntFormat = int(f11Value) + int(f12Value) + int(f13Value) + int(f14Value) + int(f15Value)
                f2NULLValueInIntFormat = int(f21Value) + int(f22Value) + int(f23Value) + int(f24Value) + int(f25Value)
                f3NULLValueInIntFormat = int(f31Value) + int(f32Value) + int(f33Value) + int(f34Value) + int(f35Value)
                f4NULLValueInIntFormat = int(f41Value) + int(f42Value) + int(f43Value) + int(f44Value) + int(f45Value)
                f5NULLValueInIntFormat = int(f51Value) + int(f52Value) + int(f53Value) + int(f54Value) + int(f55Value)
                '''
                                
                '''
                doc_distinct_words = currentLineElementsList[10]
                text_size = currentLineElementsList[11]
                script_size = currentLineElementsList[12]
                script_text_ratio = currentLineElementsList[13]
                doc_outlinks = currentLineElementsList[14]
                whether_in_header = currentLineElementsList[15]
                whether_in_title = currentLineElementsList[16]
                whether_in_bold = currentLineElementsList[17]
                whether_in_url = currentLineElementsList[18]
                whether_in_italic = currentLineElementsList[19]
                current_term_QL = currentLineElementsList[20]
                current_term_distribution = currentLineElementsList[21]
                
                labelTop10 = currentLineElementsList[22]
                
                posting_rank_in_doc = currentLineElementsList[23]
                posting_rank_in_list = currentLineElementsList[24]
                '''
                
                trainingExampleLine = ""
                
                # index 0
                trainingExampleLine += trecID + " "
                # index 1
                trainingExampleLine += queryID + " "
                # index 2
                trainingExampleLine += term + " "
                # index 3
                trainingExampleLine += partialBM25 + " "
                # index 4
                trainingExampleLine += length_of_the_inverted_index + " "
                # index 5
                trainingExampleLine += term_freq_in_doc + " "
                # index 6
                trainingExampleLine += doc_words + " "
                # index 7
                trainingExampleLine += overallBM25Score + " "
                # index 8
                trainingExampleLine += rank_in_this_results_list_for_this_query + " "
                # index 9
                trainingExampleLine += term_freq_in_queries + " "
                # index 10
                trainingExampleLine += labelTop10 + " "
                
                # compute the labelTop100 on the fly
                labelTop100 = "None"
                # for debug
                # print "rank_in_this_results_list_for_this_query:",rank_in_this_results_list_for_this_query
                if int(rank_in_this_results_list_for_this_query) <= 100:
                    labelTop100 = "True"
                else:
                    labelTop100 = "False"
                
                trainingExampleLine += labelTop100 + " "
                # for debug
                # print trainingExampleLine
                
                # index 11
                trainingExampleLine += posting_rank_in_list + " "
                # index 12
                trainingExampleLine += posting_rank_in_doc + " "
                # index 13
                trainingExampleLine += term_freq_in_collection + " "
                
                
                # extra features
                trainingExampleLine += f11Value + " "
                trainingExampleLine += f21Value + " "
                trainingExampleLine += f31Value + " "
                trainingExampleLine += f41Value + " "
                trainingExampleLine += f51Value + " "
    
                trainingExampleLine += f12Value + " "
                trainingExampleLine += f22Value + " "
                trainingExampleLine += f32Value + " "
                trainingExampleLine += f42Value + " "
                trainingExampleLine += f52Value + " "            
    
                trainingExampleLine += f13Value + " "
                trainingExampleLine += f23Value + " "
                trainingExampleLine += f33Value + " "
                trainingExampleLine += f43Value + " "
                trainingExampleLine += f53Value + " "            
                
                trainingExampleLine += f14Value + " "
                trainingExampleLine += f24Value + " "
                trainingExampleLine += f34Value + " "
                trainingExampleLine += f44Value + " "
                trainingExampleLine += f54Value + " "
                
                trainingExampleLine += f15Value + " "
                trainingExampleLine += f25Value + " "
                trainingExampleLine += f35Value + " "
                trainingExampleLine += f45Value + " "
                trainingExampleLine += f55Value + " "
                
                
                '''
                # extra features for testing today
                trainingExampleLine += str( f1NULLValueInIntFormat ) + " "
                trainingExampleLine += str( f2NULLValueInIntFormat ) + " "
                trainingExampleLine += str( f3NULLValueInIntFormat ) + " "
                trainingExampleLine += str( f4NULLValueInIntFormat ) + " "
                trainingExampleLine += str( f5NULLValueInIntFormat ) + " "
                '''
                
                # backup feature sets
                '''
                # index 10
                trainingExampleLine += doc_distinct_words + " "
                # index 11
                trainingExampleLine += text_size + " "
                # index 12
                trainingExampleLine += script_size + " "
                # index 13
                trainingExampleLine += script_text_ratio + " "
                # index 14
                trainingExampleLine += doc_outlinks + " "
                # index 15
                trainingExampleLine += whether_in_header + " "
                # index 16
                trainingExampleLine += whether_in_title + " "
                # index 17
                trainingExampleLine += whether_in_bold + " "
                # index 18
                trainingExampleLine += whether_in_url + " "
                # index 19
                trainingExampleLine += whether_in_italic + " "
                # index 20
                trainingExampleLine += current_term_QL + " "
                # index 21
                trainingExampleLine += current_term_distribution + " "
                # index 22
                trainingExampleLine += labelTop10 + " "
                # index 23
                trainingExampleLine += posting_rank_in_doc + " "
                # index 24
                trainingExampleLine += posting_rank_in_list
                '''
                
                outputFileHandler.write(trainingExampleLine + "\n")
            else:
                print "Format NOT right."
                print "# of elements:",len(currentLineElementsList)
                sys.exit(1)

print "Program Begins..."
# Updated by Wei on 20140912
ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_20140912_step7"
ofn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/trainingFile_20140912_step7.arff"
ifh = open(ifn,"r")
ofh = open(ofn,"w")
constructMetaDataLines(ofh,0)
constructDataLinesOption1(ifh,ofh,0)
ifh.close()
ofh.close()
print "Overall:"
print "ifn:",ifn
print "ofn:",ofn
exit(1)

# the current date of the source file: 2013/07/16
sourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_19_tfqAdded_tfcAdded_postingRankInDocAdd_postingRankInListAdd_percentageForPostingRankInDoc_percentageForPostingRankInList_classLabelsAdded_WITH_headline_sorted_by_queryID.train"
# sourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_15_tfqAdded_tfcAdded_postingRankInDocAdd_postingRankInListAdd_percentageForPostingRankInDoc_percentageForPostingRankInList_classLabelsAdded_WITH_headline_sortedBy_trecID.train"
# sourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_15_tfqAdded_tfcAdded_postingRankInDocAdd_postingRankInListAdd_percentageForPostingRankInDoc_percentageForPostingRankInList_classLabelsAdded_WITH_headline_sortedBy_QueryID.train"    
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_19_sorted_by_queryID.arff"
# outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_15_sortedBy_trecID.arff"
# outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_15_sortedBy_QueryID.arff"


sourceFileHandler = open(sourceFileName,"r")
outputFileHandler = open(outputFileName,"w")

constructMetaDataLines(outputFileHandler,1)

constructDataLinesOption1(sourceFileHandler,outputFileHandler,1)

sourceFileHandler.close()
outputFileHandler.close()
print "Overall Processing Statistics:"
print "sourceFileName:",sourceFileName
print "outputFileName:",outputFileName
print "Program Ends."
