from __future__ import division
import gzip
import os

# updated by Wei 2012/11/26
# This is even better cause all the important documents (both for trec2004 to trec2006 human judged and approximate BM25) have been included

# inputFilesPath = "/data5/team/weijiang/the_new_trip_of_feature_generation/auxFiles/cluewebDataDocsMapping"
inputFilesPath = "/data5/team/obukai/the_new_trip_of_feature_generation/auxFilesALL/gov2DataDocsMapping"

doc_compressed_position_info_dict = {}
for dirname, dirnames, filenames in os.walk(inputFilesPath):
    for filename in filenames:
        inputSourceFileName = os.path.join(dirname, filename)
        inputSourceFileHandler = open(inputSourceFileName,"r") 
        for line in inputSourceFileHandler.readlines():
            elements = line.strip().split(" ")
            docID = elements[0]
            WARCTRECID = elements[1]
            # I don't know whether this compressedFileID is useful or not.
            compressedFileID =elements[2]
            beginningPosition = elements[3]
            endingPosition = elements[4]
            if WARCTRECID not in doc_compressed_position_info_dict:
                doc_compressed_position_info_dict[WARCTRECID] = docID + " " + beginningPosition + " " + endingPosition


print "Length of the docLookUpDict(Unknown meaning):",len(doc_compressed_position_info_dict)

# for example: 
# key:clueweb09-en0123-98-34344 
# value:45316 831227987 831242121
 
#for WARCTRECID in doc_compressed_position_info_dict:
#    print WARCTRECID,doc_compressed_position_info_dict[WARCTRECID]

# current version.
inputFilesPath2 = "/data5/team/obukai/the_new_trip_of_feature_generation/auxFilesALL/gov2DataCompressedFileMapping"

compressed_file_location_info_dict = {}
for dirname, dirnames, segments in os.walk(inputFilesPath2):
    for segment in segments:
        inputSourceFileName = os.path.join(dirname, segment)
        # print "inputSourceFileName:",inputSourceFileName
        inputSourceFileHandler = open(inputSourceFileName,"r") 
        for line in inputSourceFileHandler.readlines():
            # print "test line:",line
            tempString = line.strip().split(" ")[1]
            compressedFileID = tempString.split("/")[-1].split(".")[0]
            key = segment + "_" + compressedFileID
            # print "test key:",key
            if key not in compressed_file_location_info_dict:
                compressed_file_location_info_dict[key] = tempString
            else:
                print "error"
                exit(1)

print "Length of the fileLookUpDict(Unknown meaning):",len(compressed_file_location_info_dict)


inputFileName = "/data1/team/obukai/machine-learning-project-related/trec-related/trecCombine04_06/priorityTrecDocumentsIDs_include_approximating_BM25_AND_human_judged.txt"                                                                              
inputFileHandler = open(inputFileName,"r")

outputTrecIDWebPageList = []
for line in inputFileHandler.readlines():
    # print "test line:",line.strip()
    outputTrecIDWebPageList.append( line.strip().split(" ")[1] )
print "# of documents needed to export:",len(outputTrecIDWebPageList)

previousPath = ""

# maybe there is some threading id issue here
errorOutputFileName = "/data3/obukai/human_judge_web_pages_gov2_ALL/overallErrorMessage.txt"
                       
errorOutputFileHandler = open(errorOutputFileName,"w")


for outputTrecID in outputTrecIDWebPageList:
    # example for clueweb09 data
    # clueweb09-en0000-00-07898
    # clueweb09-en0000-00-00142
    # clueweb09-en0000-00-00175
    # clueweb09-en0000-01-00206
    
    # example for gov2 data
    # GX000-00-0000000
    
    # old version
    # trecID = raw_input('Enter trecID:')
    
    # new version
    trecID = outputTrecID
    print "please enter trecID:",trecID
    
    
    elements = trecID.split("-")
    segmentNum = elements[0]
    compressedFileNum = elements[1]
    key = segmentNum + "_" + compressedFileNum
    currentPath = compressed_file_location_info_dict[key]
    
    # No use docID
    if trecID in doc_compressed_position_info_dict:
        docID = doc_compressed_position_info_dict[trecID].split(" ")[0]
        beginningPostion = int(doc_compressed_position_info_dict[trecID].split(" ")[1])
        endingPosition = int(doc_compressed_position_info_dict[trecID].split(" ")[2])
        print "key:",key," currentPath:",currentPath
        print "key:",trecID," docID:",docID," beginningPostion:",beginningPostion," endingPosition:",endingPosition
        
        if currentPath != previousPath:
            f = gzip.open(currentPath,"rb")
            file_content = f.read()
            previousPath = currentPath
        else:
            # I can just do nothing cause I use the same content in the main memory
            pass
        
        print "read the whole decompressed file into main memory...DONE"
        outputFileName = "/data3/obukai/human_judge_web_pages_gov2_ALL/" + trecID.split("-")[0] + "/"+ trecID + "_" + docID + ".txt"
        print "outputFileName:",outputFileName
        print "dump the web page into disk...DONE"
        outputFileHandler = open(outputFileName,"w")
        outputFileHandler.write(file_content[beginningPostion:endingPosition])
        outputFileHandler.close()
        print 
    else:
        errorOutputFileHandler.write(trecID + " is NOT in the dict doc_compressed_position_info_dict." + "\n")
        #trecID is NOT in doc_compressed_position_info_dict, just ignore it.
    
# for example:
# key:en0130_79 
# value:/data3/ClueWeb09_English_10/en0130/79.warc.gz

#for key in compressed_file_location_info_dict:
#    print key,compressed_file_location_info_dict[key]

errorOutputFileHandler.close()