"""Documentation about all HELPER function"""

import helper
import document
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()

import nltk.data
sentence_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')

#Gunakan WordPunctTokenizar agar dipisah berdasarkan punctuation
from nltk.tokenize import WordPunctTokenizer
word_tokenizer = WordPunctTokenizer()

import math

class DocumentQueryProcessing:
    document_collection_file = ""
    stop_list = ""
    total_document = 0
    one_term_tuples = []
    term_frequency_dict = {}
    document_frequency_dict = {}
    term_weighting_code = ""
    document_term_weighting_code = ""
    query_term_weighting_code = ""
    document_tf_max = -1
    document_list = {}
    
    #Menyimpan total term untuk sebuah dokumen
    total_term_per_document = {}
    
    #Path inverted file disimpan
    inverted_file_saved_path = ""
    
    #Menyimpan boolean value apakah memakai stemming atau tidak
    is_use_stemming = True
    
    #term_weighting_code berupa string e.g. lnc.ltc
    def __init__(self, document_collection_file, stop_list, is_use_stemming, term_weighting_code, inverted_file_saved_path):
        self.document_collection_file = document_collection_file
        self.stop_list = stop_list
        self.term_weighting_code = term_weighting_code
        self.document_term_weighting_code = self.term_weighting_code[0:3]
        self.query_term_weighting_code = self.term_weighting_code[4:7]
        self.inverted_file_saved_path = inverted_file_saved_path
        self.is_use_stemming = is_use_stemming
    
    def lowerCase(self, doc):
        doc.lowerCase()
    
    def documentTokenization(self, doc):
        document_list_token = doc.titleTokenization() + doc.authorTokenization() + doc.contentTokenization()
        document_list_token = [helper.punctuation.sub("", word) for word in document_list_token]
        document_list_token = [word for word in document_list_token if word.strip()]
        return document_list_token
    
    def stopWordsRemoval(self, document_list_token):
        f_stop_words = open(self.stop_list, 'r')
        stop_words_list = []
        for line in f_stop_words:
            stop_words_list.append(line[:len(line)-1])
        #print "STOPWORDS :"
        #print stop_words_list
        document_list_token = [word for word in document_list_token if word not in stop_words_list]
        return document_list_token
    
    #Lakukan stemming dengan PorterStemmer
    def stemming(self, document_list_token):
        for i in range(len(document_list_token)):
            document_list_token[i] = stemmer.stem(document_list_token[i])
    
    def updateDFDict(self):
        for k in self.term_frequency_dict:
            if k in self.document_frequency_dict:
                self.document_frequency_dict[k] += 1
            else:
                self.document_frequency_dict[k] = 1
    
    #Apply tf pada one_term(tuple 5 di inverted file yang tfnya adalah raw_tf)
    def getApplyTF(self, tf_code, one_term):
        if tf_code == "n":
            return one_term[2]
        elif tf_code == "l":
            return 1 + math.log10(one_term[2])
        elif tf_code == "b":
            if one_term[2] >0 :
                return 1
            else:
                return 0
        elif tf_code == "a":
            if self.document_tf_max > 0:
                return 0.5 + 0.5*(one_term[2]*1.0/self.document_tf_max)
            else:
                return 0.5 + 0.5*(one_term[2]*1.0)
    
    #Apply idf pada one_term
    def getApplyIDF(self, idf_code, one_term):
        if idf_code == "n":
            return 1.0
        elif idf_code == "t":
            if one_term[3] > 0:
                return math.log10(self.total_document/one_term[3])
            else:
                return math.log10(self.total_document)
        pass
    
    #Melakukan term weighting pada one_term_tuples dengan document id dari first_index ke second_index
    def normalization(self, one_term_tuples, first_index, second_index):
        divider = 0.0
        for i in range(first_index, second_index+1):
            divider += (1.0 * one_term_tuples[i][4] * one_term_tuples[i][4])
        divider = math.sqrt(divider)
        for i in range(first_index, second_index+1):
            if divider > 0:
                one_term_tuples[i][4] /= divider
    
    #Melakukan term-weighting pada one_term_tuples
    #term_weighting_code terdiri dari 3 code untuk document term weighting
    def doDocumentTermWeighting(self, one_term_tuples, term_weighting_code):
        #print "document term_weighting code : ", term_weighting_code
        for one_term in one_term_tuples:
            #First apply the tf 
            one_term[4] = self.getApplyTF(term_weighting_code[0], one_term)
            #Then multiply it with idf
            one_term[4] *= self.getApplyIDF(term_weighting_code[1], one_term)
        
        #Apply the normalization
        if term_weighting_code[2] == "c":
            first_index = 0
            for doc_id in self.total_term_per_document:
                second_index = first_index + self.total_term_per_document[doc_id] - 1
                self.normalization(one_term_tuples, first_index, second_index)
                first_index = second_index + 1


    def documentCollectionProcessing(self):
        #Baca input document_collection
        f_document = open(self.document_collection_file, 'r');
        document_id = ''
        title = ''
        author = ''
        content = ''
        current_state = ''
        
        self.total_document = 0
        
        for line in f_document:
            #format pengenal
            if (line[0] == '.'):
                #id document
                if (line[1] == 'I'):
                    self.total_document += 1
                    #Kasus sudah membaca 1 dokumen, lakukan processing
                    if (document_id != ''):
                        #print "doc id terakhir : ", document_id
                        document_id = document_id[:len(document_id)-1]
                        #Inisialisasi object 1 dokumen
                        one_document = document.Document(document_id, title, author, content)
                        #Masukkan ke list dokumen
                        self.document_list[one_document.id] = one_document
                        
                        #Set lowerCase
                        self.lowerCase(one_document)
                        
                        #Lakukan Tokenization
                        document_list_token = self.documentTokenization(one_document)
                        #print document_list_token
                        
                        #Lakukan StopwordsRemoval
                        document_list_token = self.stopWordsRemoval(document_list_token)
                        #print "AFTER STOPWORDS :"
                        #print document_list_token
                        
                        #Lakukan Stemming
                        if self.is_use_stemming:
                            self.stemming(document_list_token)
                        #print document_list_token
                        
                        #Ubah term_list ke term_frequency_dictionary
                        self.term_frequency_dict = helper.termListToTermFrequencyDictionary(document_list_token)
                        #print self.term_frequency_dict
                        
                        #Isi total panjang per dokumen
                        self.total_term_per_document[document_id] = len(self.term_frequency_dict)
                        
                        self.document_tf_max = -1
                        for k in self.term_frequency_dict:
                            if self.document_tf_max < self.term_frequency_dict[k]:
                                self.document_tf_max = self.term_frequency_dict[k]
                            self.one_term_tuples.append([k, document_id, self.term_frequency_dict[k], 1, 0.1])
                        
                        #Update document_frequency_dict
                        self.updateDFDict()
                        
                        #print self.document_frequency_dict
                        #break
                    document_id = ''
                    title = ''
                    author = ''
                    content = ''
                    current_state = 'I'
                    document_id = line[3:]
                elif (line[1] == 'T'):
                    current_state = 'T'
                elif (line[1] == 'A'):
                    current_state = 'A'
                elif (line[1] == 'W'):
                    current_state = 'W'
            else:
                if (current_state == 'T'):
                    title += line
                elif (current_state == 'A'):
                    author += line
                elif (current_state == 'W'):
                    content += line
            
        
        #Hitung juga dokument yang terakhir
        if (document_id != ''):
            #print "doc id terakhir : ", document_id
            document_id = document_id[:len(document_id)-1]
            #Inisialisasi object 1 dokumen
            one_document = document.Document(document_id, title, author, content)
            #Masukkan ke list dokumen
            self.document_list[one_document.id] = one_document
            
            #Set lowerCase
            self.lowerCase(one_document)
            
            #Lakukan Tokenization
            document_list_token = self.documentTokenization(one_document)
            #print document_list_token
            
            #Lakukan StopwordsRemoval
            document_list_token = self.stopWordsRemoval(document_list_token)
            #print "AFTER STOPWORDS :"
            #print document_list_token
            
            #Lakukan Stemming
            if self.is_use_stemming:
                self.stemming(document_list_token)
            #print document_list_token
            
            #Ubah term_list ke term_frequency_dictionary
            self.term_frequency_dict = helper.termListToTermFrequencyDictionary(document_list_token)
            #print self.term_frequency_dict
            
            #Isi total panjang per dokumen
            self.total_term_per_document[document_id] = len(self.term_frequency_dict)
            
            for k in self.term_frequency_dict:
                if self.document_tf_max < self.term_frequency_dict[k]:
                    self.document_tf_max = self.term_frequency_dict[k]
                self.one_term_tuples.append([k, document_id, self.term_frequency_dict[k], 1, 0.1])
            
            #Update document_frequency_dict
            self.updateDFDict()
            
        #print self.one_term_tuples
        #print "\n\n\n\n"
        #print self.document_frequency_dict
        
        #Update the document frequency in one_term_tuples
        for one_term in self.one_term_tuples:
            one_term[3] = self.document_frequency_dict[one_term[0]]
        
        #Update term-weighting code
        #print "Document tf-max : ", self.document_tf_max
        #print "Total document :  ", self.total_document
        #print self.one_term_tuples
        
        #Lakukan normalization di document term weighting
        self.doDocumentTermWeighting(self.one_term_tuples, self.document_term_weighting_code)
        #print "AFTER"
        #print self.one_term_tuples
        
        #Save the one_term_tuples to inverted file
        self.save_to_inverted_file()
    
    def save_to_inverted_file(self):
        #sort the one_term_tuples first
        self.one_term_tuples = sorted(self.one_term_tuples, key = lambda one_term: one_term[0])
        
        f_inverted_file = open(self.inverted_file_saved_path, 'w')
        f_inverted_file.writelines("term doc_id raw_tf df weight\n")
        for one_term in self.one_term_tuples:
            f_inverted_file.writelines("%s %s %.4f %.4f %.4f\n" %(one_term[0], one_term[1], one_term[2], one_term[3], one_term[4]))
        f_inverted_file.close()
        pass
    
    def queryProcessing(self, query):
        #First, Set lowerCase
        query = query.lower()
        
        #Second, Tokenize the query via sentence tokenizer
        query_sentence_list_token = sentence_tokenizer.tokenize(query)
        #print query_sentence_list_token
        
        #Tokenize again the sentence via word tokenizer
        query_list_token = []
        for sentence in query_sentence_list_token:
            query_list_token += word_tokenizer.tokenize(sentence)
        
        query_list_token = [helper.punctuation.sub("", word) for word in query_list_token]
        query_list_token = [word for word in query_list_token if word.strip()]
        
        #Third, do StopWordsRemoval
        query_list_token = self.stopWordsRemoval(query_list_token)
        
        #Fourth, do Stemming via PorterStemmer
        if self.is_use_stemming:
            self.stemming(query_list_token)
        
        #Buat term_frequency dictionary untuk query
        query_term_frequency_dict = helper.termListToTermFrequencyDictionary(query_list_token)
        
        #print query_term_frequency_dict
        return query_term_frequency_dict

# testing
if __name__ == '__main__':
    document_collection_file = 'simple_document_collection.txt'
    #query_list_file = 'query_list.txt'
    stop_list_file = 'stoplist.txt'
    term_weighting_code = "lnc.ltc"
    inverted_file_saved_path = "D:\\SEMESTER 7\\IR\\TUBES1\\freedom_ir\\freedom_inverted_file.txt"
    inverted_file_saved_path = "freedom_inverted_file.txt"
    
    is_use_stemming = True
    
    document_query_processing = DocumentQueryProcessing(document_collection_file, stop_list_file, is_use_stemming, term_weighting_code, inverted_file_saved_path)
    document_query_processing.documentCollectionProcessing()
    #print stemmer.stem('cooking')
    #print stemmer.stem('cookery')
    query = """What problems and concerns are there in making up descriptive titles?  
What difficulties are involved in automatically retrieving articles from 
approximate titles?  
What is the usual relevance of the content of articles to their titles?"""
    
    #print document_query_processing.queryProcessing(query)
    
