import sys

def constructMetaDataLines(outputFileHandler):
    print "constructMetaDataLines for weka training file"

    outputFileHandler.write("@relation learning_to_impact_20121009" + "\n")
    outputFileHandler.write("@attribute queryID string" + "\n")
    outputFileHandler.write("@attribute trecID string" + "\n")
    outputFileHandler.write("@attribute is_doc_in_WIKI {1, 0}" + "\n")
    outputFileHandler.write("@attribute hamProbability real" + "\n")
    outputFileHandler.write("@attribute query_terms numeric" + "\n")
    outputFileHandler.write("@attribute query_contains_number {True, False}" + "\n")
    outputFileHandler.write("@attribute query_is_number {True, False}" + "\n")
    
    outputFileHandler.write("@attribute sum_term_trace_freq numeric" + "\n")
    outputFileHandler.write("@attribute avg_term_trace_freq real" + "\n")
    outputFileHandler.write("@attribute min_term_trace_freq numeric" + "\n")
    outputFileHandler.write("@attribute max_term_trace_freq numeric" + "\n")
    
    outputFileHandler.write("@attribute url_length numeric" + "\n")
    outputFileHandler.write("@attribute url_in_top_million {True, False}" + "\n")
    
    outputFileHandler.write("@attribute url_nesting numeric" + "\n")
    outputFileHandler.write("@attribute url_domain {com, net, edu, gov, org, other}" + "\n")
    
    outputFileHandler.write("@attribute doc_size numeric" + "\n")
    outputFileHandler.write("@attribute doc_words numeric" + "\n")
    outputFileHandler.write("@attribute doc_distinct_words numeric" + "\n")
    
    outputFileHandler.write("@attribute text_size numeric" + "\n")
    outputFileHandler.write("@attribute script_size numeric" + "\n")
    outputFileHandler.write("@attribute script_text_ratio real" + "\n")
    
    outputFileHandler.write("@attribute doc_outlinks numeric" + "\n")
    
    outputFileHandler.write("@attribute sum_term_col_freq numeric" + "\n")
    outputFileHandler.write("@attribute max_term_col_freq numeric" + "\n")
    outputFileHandler.write("@attribute min_term_col_freq numeric" + "\n")
    outputFileHandler.write("@attribute avg_term_col_freq real" + "\n")
    
    outputFileHandler.write("@attribute terms_in_header numeric" + "\n")
    outputFileHandler.write("@attribute terms_in_title numeric" + "\n")
    outputFileHandler.write("@attribute terms_in_bold numeric" + "\n")
    outputFileHandler.write("@attribute terms_in_url numeric" + "\n")
    outputFileHandler.write("@attribute terms_in_italic numeric" + "\n")     
    
    outputFileHandler.write("@attribute frac_terms_in_header real" + "\n")
    outputFileHandler.write("@attribute frac_terms_in_title real" + "\n")
    outputFileHandler.write("@attribute frac_terms_in_bold real" + "\n")
    outputFileHandler.write("@attribute frac_terms_in_url real" + "\n")
    outputFileHandler.write("@attribute frac_terms_in_italic real" + "\n")    
    
    outputFileHandler.write("@attribute sum_term_freq_in_doc numeric" + "\n")
    outputFileHandler.write("@attribute min_term_freq_in_doc numeric" + "\n")
    outputFileHandler.write("@attribute max_term_freq_in_doc numeric" + "\n")
    outputFileHandler.write("@attribute avg_term_freq_in_doc real" + "\n")    
    
    outputFileHandler.write("@attribute sum_term_rel_freq_in_doc real" + "\n")
    outputFileHandler.write("@attribute min_term_rel_freq_in_doc real" + "\n")
    outputFileHandler.write("@attribute max_term_rel_freq_in_doc real" + "\n")
    outputFileHandler.write("@attribute avg_term_rel_freq_in_doc real" + "\n")    
    
    outputFileHandler.write("@attribute sum_term_BM25 real" + "\n")
    outputFileHandler.write("@attribute avg_term_BM25 real" + "\n")
    outputFileHandler.write("@attribute min_term_BM25 real" + "\n")
    outputFileHandler.write("@attribute max_term_BM25 real" + "\n")    
    
    outputFileHandler.write("@attribute relevenceScore {-2, 0, 1, 2, 3}" + "\n")
    

def constructDataLines(outputFileHandler,sourceFileHandler):
    print "constructDataLines"
    
    outputFileHandler.write("@data" + "\n")
    
    
    #currentLine = sourceFileHandler.readline()
    # The following line output is: 49
    # print len(currentLine.strip().split(" "))
    
    
    for index,currentLine in enumerate( sourceFileHandler.readlines() ):
        currentLineElementsList = currentLine.strip().split(" ")
        if len(currentLineElementsList) == 49:
            
            trecID = currentLineElementsList[0]
            queryID = currentLineElementsList[1]
            is_doc_in_WIKI = currentLineElementsList[2]
            hamProbability = currentLineElementsList[3]
            query_terms = currentLineElementsList[4]
            query_contains_number = currentLineElementsList[5]
            query_is_number = currentLineElementsList[6]
            
            sum_term_trace_freq = currentLineElementsList[7]
            avg_term_trace_freq = currentLineElementsList[8]
            min_term_trace_freq = currentLineElementsList[9]
            max_term_trace_freq = currentLineElementsList[10]
            
            url_length = currentLineElementsList[11]
            url_in_top_million = currentLineElementsList[12]
            url_nesting = currentLineElementsList[13]
            url_domain = currentLineElementsList[14]
            
            doc_size = currentLineElementsList[15]
            doc_words = currentLineElementsList[16]
            doc_distinct_words = currentLineElementsList[17]
            text_size = currentLineElementsList[18]
            script_size = currentLineElementsList[19]
            script_text_ratio = currentLineElementsList[20]
            doc_outlinks = currentLineElementsList[21]
            
            sum_term_col_freq = currentLineElementsList[22]
            max_term_col_freq = currentLineElementsList[23]
            min_term_col_freq = currentLineElementsList[24]
            avg_term_col_freq = currentLineElementsList[25]
            
            terms_in_header = currentLineElementsList[26]
            terms_in_title = currentLineElementsList[27]
            terms_in_bold = currentLineElementsList[28]
            terms_in_url = currentLineElementsList[29]
            terms_in_italic = currentLineElementsList[30]
            
            frac_terms_in_header = currentLineElementsList[31]
            frac_terms_in_title = currentLineElementsList[32]
            frac_terms_in_bold = currentLineElementsList[33]
            frac_terms_in_url = currentLineElementsList[34]
            frac_terms_in_italic = currentLineElementsList[35]
            
            sum_term_freq_in_doc = currentLineElementsList[36]
            min_term_freq_in_doc = currentLineElementsList[37]
            max_term_freq_in_doc = currentLineElementsList[38]
            avg_term_freq_in_doc = currentLineElementsList[39]
            
            sum_term_rel_freq_in_doc = currentLineElementsList[40]
            min_term_rel_freq_in_doc = currentLineElementsList[41]
            max_term_rel_freq_in_doc = currentLineElementsList[42]
            avg_term_rel_freq_in_doc = currentLineElementsList[43]
            
            sum_term_BM25 = currentLineElementsList[44]
            avg_term_BM25 = currentLineElementsList[45]
            min_term_BM25 = currentLineElementsList[46]
            max_term_BM25 = currentLineElementsList[47]
            relevenceScore = currentLineElementsList[48]
            
            
            trainingExampleLine = ""
            trainingExampleLine += trecID + " "
            trainingExampleLine += queryID + " "
            trainingExampleLine += is_doc_in_WIKI + " "
            trainingExampleLine += hamProbability + " "
            trainingExampleLine += query_terms + " "
            trainingExampleLine += query_contains_number + " "
            trainingExampleLine += query_is_number + " "
            
            trainingExampleLine += sum_term_trace_freq + " "
            trainingExampleLine += avg_term_trace_freq + " "
            trainingExampleLine += min_term_trace_freq + " "
            trainingExampleLine += max_term_trace_freq + " "
            
            trainingExampleLine += url_length + " "
            trainingExampleLine += url_in_top_million + " "
            trainingExampleLine += url_nesting + " "
            trainingExampleLine += url_domain + " "
            trainingExampleLine += doc_size + " "
            trainingExampleLine += doc_words + " "
            trainingExampleLine += doc_distinct_words + " "
            trainingExampleLine += text_size + " "
            trainingExampleLine += script_size + " "
            trainingExampleLine += script_text_ratio + " "
            trainingExampleLine += doc_outlinks + " "
            
            trainingExampleLine += sum_term_col_freq + " "
            trainingExampleLine += max_term_col_freq + " "
            trainingExampleLine += min_term_col_freq + " "
            trainingExampleLine += avg_term_col_freq + " "
            
            trainingExampleLine += terms_in_header + " "
            trainingExampleLine += terms_in_title + " "
            trainingExampleLine += terms_in_bold + " "
            trainingExampleLine += terms_in_url + " "
            trainingExampleLine += terms_in_italic + " "
            
            trainingExampleLine += frac_terms_in_header + " "
            trainingExampleLine += frac_terms_in_title + " "
            trainingExampleLine += frac_terms_in_bold + " "
            trainingExampleLine += frac_terms_in_url + " "
            trainingExampleLine += frac_terms_in_italic + " "
            
            trainingExampleLine += sum_term_freq_in_doc + " "
            trainingExampleLine += min_term_freq_in_doc + " "
            trainingExampleLine += max_term_freq_in_doc + " "
            trainingExampleLine += avg_term_freq_in_doc + " "
            
            trainingExampleLine += sum_term_rel_freq_in_doc + " "
            trainingExampleLine += min_term_rel_freq_in_doc + " "
            trainingExampleLine += max_term_rel_freq_in_doc + " "
            trainingExampleLine += avg_term_rel_freq_in_doc + " "
            
            trainingExampleLine += sum_term_BM25 + " "
            trainingExampleLine += avg_term_BM25 + " "
            trainingExampleLine += min_term_BM25 + " "
            trainingExampleLine += max_term_BM25 + " "
            trainingExampleLine += relevenceScore
            
            outputFileHandler.write(trainingExampleLine + "\n")
            
        else:
            print "Format NOT right."
            print "# of elements:",len(currentLineElementsList)
            sys.exit(1)
    
    
outputFileName = "/data1/team/weijiang/machine-learning-project-related/featuresGenerated/finalTrainingFiles/learning_to_impact_weka_training_file_20121010.arff"
sourceFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/wholeTrainingFileTempleteV3_output_feature_phase0_1_fix1.txt"

outputFileHandler = open(outputFileName,"w")
sourceFileHandler = open(sourceFileName,"r")

constructMetaDataLines(outputFileHandler)
constructDataLines(outputFileHandler,sourceFileHandler)
outputFileHandler.close()
print "Job Done."
    