#!/usr/bin/python
# coding=utf-8
import Adjective, TwitterScraper
import nltk, codecs, time

stopwords = set(nltk.corpus.stopwords.words('spanish'))

def delete_chars(text,chars):
  ret = ''
  for c in text:
    if not c in chars:
      ret += c
  return ret

def clean_text(text):
  text = delete_chars(text, u'?.,;!¡¿(){}[]+-0123456789:<>¬=_-^`"\'\\/$%&~°|')
  text = text.lower()
  return [ word for word in text.split() if not word in stopwords and word[0]!='#' ]

f = codecs.open('adjetivos.txt',encoding='utf-8',mode='r')
adjectives = [ adj.strip().lower() for adj in f.readlines() ]
comp_adjectives = [ adj for adj in adjectives if adj.find(' ') > -1 ]
f.close()

adjectives_found = set([u'malo',u'bueno',u'neutro'])

# Analizamos los adjetivos de los tweets
tweets = TwitterScraper.load_tweets("tweets")
for tweet in tweets:
  if tweet['text'].count('#') > 1:
    continue
 
  text = clean_text(tweet['text'])
  
  for word in text:
    t = Adjective.male_form(Adjective.singular_form(word.strip()))
    if t in adjectives:
	  adjectives_found.add(t)

  for adj in comp_adjectives:
    if tweet['text'].find(adj) > -1:
	  adjectives_found.add(adj)

f = codecs.open('%d_adjectives'%(int(time.time())),encoding='utf-8',mode='w')
for adj in adjectives_found:
  f.write(adj+'\n')
f.close()
