'''
Created on 04-01-2011

@author: Lukasz
'''

from types import NoneType
import math
import nltk
import os
import random
import re

# Sciezka do zbioru dokumentow trenujacych
TR = 'TRAIN'
# Sciezka do zbioru dokumentow testowych
KL = 'TEST'

# Pobieranie slow z dokumentu
def words(document):
    words = []
    with open(document, 'r') as doctext:
        for line in doctext:
            wordsset = line.split(None)
            for word in wordsset:
                words.append(word.lower())
    return words

# Stemming dokumentu - Stemmer Portera
def stem(document):
    stem_words = []
    for word in document:
        if word.lower() in stop_words:
            continue
        stem_words.append(nltk.PorterStemmer().stem(word.lower()))
    return stem_words

stop_words = nltk.corpus.stopwords.words('english')

# Przygotowanie zbioru par ('sciezka', 'numer klasy') ze zbioru treningowego (TR)
train_docs = list()
for dirname, dirnames, filenames in os.walk(TR):
    for filename in filenames:
        m = re.match(r"\w+[\\/](\w+)", dirname)
        if m != NoneType:
            train_docs.append((os.path.join(dirname, filename), m.group(1)))

random.seed(404)
random.shuffle(train_docs)

# Przygotowanie zbioru par ('sciezka', 'numer klasy') ze zbioru testowego (KL)
test_docs = list()
print '=> Reading files.'
for dirname, dirnames, filenames in os.walk(KL):
    for filename in filenames:
        m = re.match(r"\w+[\\/](\w+)", dirname)
        if m != NoneType:
            test_docs.append((os.path.join(dirname, filename), m.group(1)))

#Petla tworzy zbior wszystkich wyrazow (allwords), ktore wystapily w tekstach
print '=> Creating allwords.'
frequent_words = nltk.FreqDist()
allwords = dict()
frequency_dict = dict()
word_features = list()
for doc in train_docs:
    frequency_dict[doc] = nltk.FreqDist()
    for word in stem(words(doc[0])):
        frequency_dict[doc].inc(word)
    for word in frequency_dict[doc]:
        if word in allwords:
            allwords[word] = allwords[word] + 1
        else:
            allwords[word] = 1
            
print '=> Creating frequent_words.'    
frequent_words = dict()
for word in allwords:
    frequent_words[word] = 0
    for doc in train_docs:
        if word in frequency_dict[doc]:
            frequent_words[word] = max(frequent_words[word], frequency_dict[doc].freq(word) * math.log10(len(train_docs) / allwords[word] + 1.0))

for elem in sorted(frequent_words.items(), key=lambda elem: (-1) * elem[1])[:2000]:
    word_features.append(elem[0])

# words - sciezka do dokumentu
# features zbior (Key,Value), gdzie Key - contains(word), a Value - True | False (czy dokument zawiera 'word')
def document_features(words): 
    features = {}
    tmp = set(stem(words))
    for word in word_features:
        features['contains(%s)' % word] = (word in tmp)
    return features

# Reprezentacja zbiorow dokumentow (testowego i treningowego) w postaci zbioru atrybutow. 
train_featuresets = [(document_features(words(d)), c) for (d, c) in train_docs]
test_featuresets = [(document_features(words(d)), c) for (d, c) in test_docs]

# Trenowanie/testowanie roznych klasyfikatorow na tych zbiorach.
print '=> Training Bayess Classifier'
# Klasyfikator: NaiveBayesClassifier
bayesClassifier = nltk.NaiveBayesClassifier.train(train_featuresets)
# Wyswietlenie najcenniejszaych wskaznikow na podstawie zbioru trenujacego.
bayesClassifier.show_most_informative_features(30)

print "BLEDNE PRZYPORZADKOWANIA DO KLAS:"
print "I - polozenie w zbiorze testowym"
print "K - wynik klasyfikacji NaiveBayesClassifier"
print "LP\tI|K\tPLIK"
i = j = 0
for (feature_set, correct_class) in test_featuresets:
    j+=1
    result_class = bayesClassifier.classify(feature_set)
    if result_class != correct_class:
        i += 1
        print "%d.\t%s|%s\t%s" % (i, correct_class, result_class, test_docs[j][0])

# Dokladnosc wg klasyfikatora NaiveBayesClassifier
print nltk.classify.accuracy(bayesClassifier, test_featuresets)
# Klasyfikator: DecisionTreeClassifier
treeClassifier = nltk.DecisionTreeClassifier.train(train_featuresets);
# Dokladnosc wg klasyfikatora DecisionTreeClassifie
print nltk.classify.accuracy(treeClassifier, test_featuresets);

