#!/usr/bin/env python
# -*- coding: utf-8 -*-
# from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
import re
import hunspell
import sys

class Stemmer:
   
    # constructor
    def __init__(self):
        reload(sys)
        sys.setdefaultencoding("utf-8")
        self.hobj = hunspell.HunSpell('/home/ati/Desktop/SetupScripts/dictionaries/Hungarian.dic', '/home/ati/Desktop/SetupScripts/dictionaries/Hungarian.aff')
#         snowball = SnowballStemmer("hungarian")
        self.TAG_RE = re.compile(r'<[^>]+>')
        self.REPLACE_PATTERN_RE = re.compile(r'[\W_]+', re.UNICODE)
        self.stopwordsHun = stopwords.words("hungarian")
   
    # removing tags
    def remove_tags(self, text):
        return self.TAG_RE.sub('', text)
    
    def stemming(self, text, isUtf8 = True):
        if isUtf8:
            old_text = unicode(text, 'utf-8')
        else:
            old_text = text
        old_text = old_text.lower()
    
        # removing tags
        self.remove_tags(old_text);
    
        # removing special characters
        old_text = self.REPLACE_PATTERN_RE.sub(' ', old_text);
         
        # Tokenizing text into bags of words    
        tokenized_docs = old_text.split()
    
        # Cleaning text of stopwords
        tokenized_docs_no_stopwords = []
        for word in tokenized_docs:
            if not word in self.stopwordsHun:
                tokenized_docs_no_stopwords.append(word)
       
        # Spell checking and Stemming
        final_doc = []
        for word in tokenized_docs_no_stopwords:
            # add only if the word is correctly spelled
            if self.hobj.spell(word):
                stemWord = self.hobj.stem(word)
                if len(stemWord) != 0:
                    final_doc.append(stemWord[0])
                else:
                    final_doc.append(word)
            
        return final_doc
