#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
"""
ProfileBuilder.py

Created by Marcel Caraciolo on 2009-11-16.
Copyright (c) 2009 Federal University of Pernambuco. All rights reserved.
"""

#User profiles recommendation

#My rates will be based on the criteria:

#Grades will be 1 to 5 (3 not like or like)

#UserRank = (id,screenName,location,description,favorites,friends,message)
#UserRank = (location=Single Valued qualitative nominal,description = Textual, favorites= Number, friends = (multivalued), message= Textual )


#Procedure

#1 - Rate all the profiles (1 - if  manual  2 - if cluster)
#2 - Create the users profiles  (2 - if manual 1 - if cluster)
#3 - Split the profiles (+ , -) Like/dislike
#4 - **The algorithm will run with a subset of the data(comparison between my profile against others)
#5 - Performance evaluation


import sqlite3
import string
import nltk

#Step 01  - Rating the profiles



dbName = 'twitterData.db'


#Opening the database
print "Opening the database  %s..." % dbName
conn = sqlite3.connect(dbName)
cursor = conn.cursor()

#Rates may come based on a clustering process (More closer to me -> bigger grades,  more far -> lower grades)
#Manual Process x Clustering Process #Grades between 1 to 5

#Manual Process
fileHandler = open('friendsManualRatings.txt','r')

result = cursor.execute('select screenName from twitterUsers')
ret = result.fetchall()

marcelProfile = {}

for line in fileHandler:
	line = line.split('\t')
	marcelProfile[line[0]] = int(line[1]) 

fileHandler.close()


marcelPositive = filter(lambda x: x[1] > 3,marcelProfile.items())

marcelNegative = filter(lambda x: x[1] < 3, marcelProfile.items())

marcelNeutral =  filter(lambda x: x[1] ==3, marcelProfile.items())


print len(marcelPositive), len(marcelNegative) , len(marcelNeutral)


#CLustering grades process (Speak with Bruno -> Bruno's Project here!)
#Thinking of removing the marcelNeutral ones!


#Step 02 - Build the user profiles

#First we will build the basic profile from twitterUsers table

result = cursor.execute('select timezone, description, followers_count, friends_count, favourites_count, friends from twitterUsers where screenName = "marcelcaraciolo" ')
ret = result.fetchall()[0]
friends = ret[5][1:len(ret[5])-1].split(',')
friends = [str(friend)[2:len(friend)-1] for friend in friends]
marcelProf = {'timezone': ret[0], 'description': ret[1], 'followersCount': ret[2], 'friendsCount': ret[3], 
                  'favouritesCount': ret[4], 'friends': friends}

#marcelProfile = dict(marcelProfile.items()[0:5])

userProfiles = {}

for (user,rate) in marcelProfile.items():
	result = cursor.execute('select timezone, description, followers_count, friends_count, favourites_count, friends from twitterUsers where screenName = ?' , (user,))
	ret = result.fetchall()[0]
	friends = ret[5][1:len(ret[5])-1].split(',')
	friends = [str(friend)[2:len(friend)-1] for friend in friends]
	userProfiles[user] = {'timezone': ret[0], 'description': ret[1], 'followersCount': ret[2], 'friendsCount': ret[3], 
	                  'favouritesCount': ret[4], 'friends': friends, 'rate' : rate}
	


#Second we will build the advanced profile with the twitters!

result = cursor.execute('select message from twitterStatus where screenName = ?' , ("marcelcaraciolo",))
ret = result.fetchall()
#Como pegar caracteres com acento,ç, etc.
statuses = [status[0] for status in ret]
marcelProf['statuses'] = statuses


for user in userProfiles.keys():
	print user
	result = cursor.execute('select message from twitterStatus where screenName = ?' , (user,))
	ret = result.fetchall()
	#Como pegar caracteres com acento,ç, etc.
	statuses = [status[0] for status in ret]
	userProfiles[user]['statuses'] = statuses
	

#Build the symbolic profiles

#TimeZone 
marcelProf['timezone'] = [(marcelProf.get('timezone'),1)]

#followersCount
if marcelProf['followersCount'] in range(0,100):
	marcelProf['followersCount'] = [(u'low',1)]
elif marcelProf['followersCount'] in range(100,500):
	marcelProf['followersCount'] = [(u'medium',1)]
else:
	marcelProf['followersCount'] = [(u'high',1)]

#Favourites Count
if marcelProf['favouritesCount'] in range(0,50):
	marcelProf['favouritesCount'] = [(u'low',1)]
elif marcelProf['favouritesCount'] in range(50,500):
	marcelProf['favouritesCount'] = [(u'medium',1)]
else:
	marcelProf['favouritesCount'] = [(u'high',1)]

#Friends
marcelProf['friends'] =  [(friend,1.0/len(friends)) for friend in friends]


#print marcelProf

#For all user profiles

for (user,profile) in userProfiles.items():
	userProfiles[user]['timezone'] = [(userProfiles[user].get('timezone'),1)]
	if userProfiles[user]['followersCount'] in range(0,100):
		userProfiles[user]['followersCount'] = [(u'low',1)]
	elif userProfiles[user]['followersCount'] in range(100,500):
		userProfiles[user]['followersCount'] = [(u'medium',1)]
	else:
		userProfiles[user]['followersCount'] = [(u'high',1)]
	
	if userProfiles[user]['favouritesCount'] in range(0,50):
		userProfiles[user]['favouritesCount'] = [(u'low',1)]
	elif userProfiles[user]['favouritesCount'] in range(50,500):
		userProfiles[user]['favouritesCount'] = [(u'medium',1)]
	else:
		userProfiles[user]['favouritesCount'] = [(u'high',1)]
		
	userProfiles[user]['friends'] =  [(friend,1.0/len(friends)) for friend in userProfiles[user].get('friends')]



#Analyse the twitter status (IDF)

stopwords_pt = nltk.corpus.stopwords.words('portuguese')
stopwords_pt = map(lambda w: unicode(w, 'utf-8'), stopwords_pt)
stopwords_en = nltk.corpus.stopwords.words('english')
stopwords_en = map(lambda w: unicode(w, 'utf-8'), stopwords_en)


#Marcel Profile


textStatus = " ".join(marcelProf['statuses'])
#Split the words
words = textStatus.split(" ")
words = [word for word in words if word.startswith('#')]
RemoveWords =  string.punctuation
for item in RemoveWords:
	words = [word.replace(item,'') for word in words]
words = filter(lambda word: not word.isdigit(), words)
words = filter(lambda word: word != '',words)
words = filter(lambda word: not word.startswith('@'), words)
words = filter(lambda word: not word.startswith('RT'), words)
words = filter(lambda word: not word.startswith('http'),words)
words = [word.lower() for word in words]
words = filter(lambda word: not word in stopwords_en, words)
words = filter(lambda word: not word in stopwords_pt, words)


total = 0
wc = {}
worldDictionary = {}
corpora = []

for word in words:
	worldDictionary.setdefault(word,0)
	wc.setdefault(word,0)
	wc[word]+=1
	worldDictionary[word]+=1


#words = [word for word in words if wc[word] > 3]
total += len(words)
corpora.append(" ".join(words))

marcelProf['statuses'] = wc.copy()


#Twitter Statuses 
for(user,profile) in userProfiles.items():
	textStatus  = " ".join(userProfiles[user]['statuses'])
	words = textStatus.split(" ")
	words = [word for word in words if word.startswith('#')]
	RemoveWords =  string.punctuation
	for item in RemoveWords:
		words = [word.replace(item,'') for word in words]
	words = filter(lambda word: not word.isdigit(), words)
	words = filter(lambda word: word != '',words)
	words = filter(lambda word: not word.startswith('@'), words)
	words = filter(lambda word: not word.startswith('RT'), words)
	words = filter(lambda word: not word.startswith('http'),words)
	words = [word.lower() for word in words]
	words = filter(lambda word: not word in stopwords_en, words)
	words = filter(lambda word: not word in stopwords_pt, words)

	wc = {}

	for word in words:
		wc.setdefault(word,0)
		worldDictionary.setdefault(word,0)
		worldDictionary[word]+=1
		wc[word]+=1
		total+=1

	#words = [word for word in words if wc[word] > 3]
	total += len(words)
	corpora.append(" ".join(words))
	
	userProfiles[user]['statuses'] = wc.copy()

#INFORMACOES IMPORTANTES TOTAL = 1763249 palavras /39353 palavras com tagged
print total
mytexts = nltk.text.TextCollection(corpora)


#Calculating the idfs for MarcelProfile
idfsMarcel = [(word,count * mytexts.idf(word)) for (word,count) in marcelProf['statuses'].items()]
totalidfs = sum([w[1] for w in idfsMarcel])
for (word,w) in idfsMarcel:
	if w == 0:
		del	marcelProf['statuses'][word]
	else:
	 	marcelProf['statuses'][word] = w/totalidfs
marcelProf['statuses'] =  marcelProf['statuses'].items()

print marcelProf['statuses'] 

#Calculating the idfs for Other users
for(user,profile) in userProfiles.items():
	idfsUser =  [(word,count * mytexts.idf(word)) for (word,count) in userProfiles[user]['statuses'].items()]
	totalidfs = sum([w[1] for w in idfsUser])
	for (word,w) in idfsUser:
		if w == 0:
			del userProfiles[user]['statuses'][word]
		else:
		  	userProfiles[user]['statuses'][word] = w/totalidfs 
	userProfiles[user]['statuses'] =  userProfiles[user]['statuses'].items()




#Write into files the user profiles
#TOTAL = 257 /
import pickle

output = open('marcelTagged.pk1', 'wb')

pickle.dump(marcelProf,output)

output.close()

total = 0
output = open('usersTagged.pk1', 'wb')

for item in userProfiles.items():
	pickle.dump(item,output)
	total+=1
output.close()

print total

#UserRank = (id,screenName,location,description,favorites,friends,message)