import nltk.classify.util
import nltk.tokenize
import re
import string
import TextSanitizationHelpers
from nltk.corpus import movie_reviews
from nltk.corpus import stopwords

def word_feats(words):
    stopset = set(stopwords.words('english')) 
    return dict([(word, True) for word in words if word not in stopset])

negativeTweets = open("TweetsCorpus.neg.txt", 'r')
positiveTweets = open("TweetsCorpus.pos.txt", 'r')

positiveTweetsFeats = []
negativeTweetsFeats = []

for tweet in negativeTweets:    
    tweet = tweet.replace("\n","")    
    
    tweet = TextSanitizationHelpers.ReplaceEmoticons(tweet)
    tweet = TextSanitizationHelpers.ReplaceUrl(tweet)    
    tweet = TextSanitizationHelpers.ReplacePointers(tweet)
    tweet = TextSanitizationHelpers.ReplaceRepeatedLetters(tweet)    
    tweet = TextSanitizationHelpers.ApplySteeming(tweet)    
    tweet = TextSanitizationHelpers.ReplacePunctuation(tweet)    
    tweet = tweet.lower()    
        
    tokenizedTweet = nltk.word_tokenize(tweet)         
    featstructs = word_feats(tokenizedTweet)
    negativeTweetsFeats.append((featstructs,'negativo'))

#negfeats = [(word_feats(movie_reviews.words(fileids=[f])), 'negativo') for f in negids]

print negativeTweetsFeats