__author__ = 'Jakub Narloch'

from WEDT.Model import Document
from WEDT.Model import Term
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem.lancaster import LancasterStemmer
import nltk
import math
import os

class ProgressListener:
    def notifyProgressStarted(self, label, max):
        pass

    def notifyProgress(self, current):
        pass

    def notifyProgressCompleted(self):
        pass

class WordProcessor:
    def __init__(self):
        pass

    def processWord(self, word):
        pass

class LowerCaseWordProcessor(WordProcessor):
    def processWord(self, word):
        return word.lower()

class StemmerWordProcessor(WordProcessor):
    """This is a decorator which wraps the nltk steamer api and process passed words"""

    __stemmer = None
    __wordProcessor = None

    def __init__(self, stemmer=None, wordProcessor=LowerCaseWordProcessor()):
        self.__stemmer = stemmer
        self.__wordProcessor = wordProcessor

    def processWord(self, word):
        return self.__stemmer.stem(self.__wordProcessor.processWord(word))

class PorterStemmerWordProcessor(StemmerWordProcessor):
    def __init__(self):
        StemmerWordProcessor.__init__(self, PorterStemmer())

class LancasterStemmerWordProcessor(StemmerWordProcessor):
    def __init__(self):
        StemmerWordProcessor.__init__(self, LancasterStemmer())

class DocumentReader:
    __wordProcessor = None
    __stopWords = None

    def __init__(self, wordProcessor=PorterStemmerWordProcessor(), stopWords=stopwords.words('english')):
        self.__wordProcessor = wordProcessor
        self.__stopWords = stopWords

    def read(self, fileName):
        document = Document()

        file_name_parts = self.split_file_name(fileName)

        document.set_name(file_name_parts[0])
        document.set_extension(file_name_parts[1])
        document.set_category(file_name_parts[2])

        result = list()

        with open(fileName, 'r') as input:
            raw_text = nltk.clean_html(input.read())

            words = nltk.word_tokenize(raw_text)

            for word in words:
                word = self.__wordProcessor.processWord(word)

                if word not in self.__stopWords:
                    result.append(word)

        document.set_words(result)

        return document

    def split_file_name(self, fileName):
        fileBaseName = os.path.basename(fileName)
        category = os.path.split(os.path.split(fileName)[0])[1]

        parts = fileBaseName.rsplit('.', 1)

        extension = ""
        if len(parts) >= 2:
            extension = parts[1]

        return parts[0], extension, category

class FeatureSelection:
    __progressListener = None
    __featureMaxCount = 0

    def __init__(self):
        pass

    def setFeatureMaxCount(self, featureMaxCount):
        self.__featureMaxCount = featureMaxCount

    def setProgressListener(self, progressListener):
        self.__progressListener = progressListener

    def calculate(self, trainDocuments):
        #obliczanie odpowiednich argumentow do obliczenia wagi kazdego ze slow
        all_terms = dict()
        all_words_amount = dict()
        print ' 1. obliczanie argumentow potrzebnych do obliczenia wag'
        for doc in trainDocuments:
            all_words_amount[doc.get_name()] = 0

            for word in doc.get_words():
                all_words_amount[doc.get_name()] = all_words_amount[doc.get_name()] + 1
                if word not in all_terms:
                    term = Term(word)
                    term.add_occurance(doc.get_name())
                    term.add_document_appear()
                    all_terms[word] = term
                else:
                    all_terms[word].add_occurance(doc.get_name())
                    if not all_terms[word].is_appear_in_doc(doc.get_name()):
                        all_terms[word].add_document_appear()


        #obliczanie wagi w/g http://en.wikipedia.org/wiki/Tf%E2%80%93idf
        print ' 2.obliczanie wag'

        if self.__progressListener is not None:
            self.__progressListener.notifyProgressStarted('Ekstrakcja cech', len(all_terms))
        tf_idf_wages = dict()
        for term in all_terms.values():
            print str(len(tf_idf_wages) + 1) + "/" + str(len(all_terms))

            if self.__progressListener is not None:
                self.__progressListener.notifyProgress(len(tf_idf_wages) + 1)
            tf_idf_wages[term] = 0
            for doc in term.get_occurance_in_specified_docs(None).keys():
                tf_idf_wages[term] = max(tf_idf_wages[term],
                                         term.get_occurance_in_specified_docs(doc) / all_words_amount[doc] * math.log10(
                                                 len(trainDocuments) / (1 + term.get_documents_where_it_appear())))

        print ' 3. zakonczono obliczanie wag'

        word_features = list()
        maxCount = self.__featureMaxCount
        if len(tf_idf_wages.items()) < maxCount:
            maxCount = len(tf_idf_wages.items())
        for term in sorted(tf_idf_wages.items(), key=lambda term: (-1) * term[1])[:maxCount]:
            word_features.append(term[0].get_word());

        return word_features