import nltk.classify.util
import nltk.tokenize
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews
from nltk.corpus import stopwords

stopset = set(stopwords.words('english')) 
def word_feats(words):
    return dict([(word, True) for word in words if word not in stopset])

negids = movie_reviews.fileids('neg')
posids = movie_reviews.fileids('pos')
 
negfeats = [(word_feats(movie_reviews.words(fileids=[f])), 'negativo') for f in negids]
posfeats = [(word_feats(movie_reviews.words(fileids=[f])), 'positivo') for f in posids]
 
negcutoff = len(negfeats)*3/4
poscutoff = len(posfeats)*3/4
 
trainfeats = negfeats[:negcutoff] + posfeats[:poscutoff]
testfeats = negfeats[negcutoff:] + posfeats[poscutoff:]
print
print '-- Entrenamos la red --'
print 'Utilizando %d reviews de entrenamiento, y %d reviews de prueba' % (len(trainfeats), len(testfeats))
print 

classifier = NaiveBayesClassifier.train(trainfeats)
print 'La accuracy es:', nltk.classify.util.accuracy(classifier, testfeats)

print
print 'Las palabras mas reelevantes son:'
print
classifier.show_most_informative_features()

tweet = ' Coca-cola is a great success' 
print
print '-- Tomar el tweet --'
print tweet

print 
print '-- Pasar el tweet a minuscula --'
tweet = tweet.lower()
print tweet

print 
print '-- Dividir el tweet en tokens --'
tokenizedTweet = nltk.word_tokenize(tweet) 
print tokenizedTweet

print
print '-- Convertir el tweet en un diccionario y eliminar stop words --'
featstructs = word_feats(tokenizedTweet) 
print featstructs

print
print '-- Clasificar el tweet --'
print 'La clasificacion del tweet es:'
value = classifier.classify(featstructs)
print value

raw_input('Presione una tecla para salir...')
