#title          : getFeatures.py
#description    : outputs a list of url, various features (tab delimited)
#author         : Arthi Ramachandran 
#date           : 20140203
#usage          : python ./getFeatures.py <posts file> <number of posts per link> <number of posts per person> <number of posts received per person> <number of clicks per link> <output file>   
#python_version :2.7 
#============================================================================                                                                                     
import sys
import datetime as dt

data_filename = sys.argv[1]

user_info = dict()

def initialize_user_info(user):
    user_info[user] = dict()
    user_info[user]["tweets"] = []
    user_info[user]["followees"] = []
    user_info[user]["num_friends"] = 0
    user_info[user]["num_followers"] = 0
    user_info[user]["num_tweets"] = 0
    user_info[user]["total_pop"] = 0
    user_info[user]["avg_ext_pop_of_posts"] = 0
    user_info[user]["percent_tweets_posted"] = 0
    user_info[user]["num_tweets_received"] = 0
    user_info[user]["reciprocal_links"] = 0
    user_info[user]["days_since_creation"] = "" # number of active days
    user_info[user]["avg_activity"]  = 0

print "getting number of tweets"
with open(sys.argv[1], "rb") as f: # count number of tweets from the training set file
    for line in f:
        fields = line.strip().split(",")
        user = fields[0]
        url = fields[1]
        if user not in user_info:
            initialize_user_info(user)
        #user_info[user]["num_tweets"] += 1
        user_info[user]["tweets"].append(url)


count = 0   
print "parsing friend graph" 
with open("NYTIMES_FULL_PARSED.sorted.txt", "rb") as f: 
    for line in f: 
        follower = line.strip().split("\t")[1] 
        followee = line.strip().split("\t")[0] 
        if follower not in user_info:
            continue
        user_info[follower]["followees"].append(followee)
        count += 1
        if count % 1000000 == 0: 
            print count  


'''
count = 0
print "parsing friend graph"
with open("NYTIMES_FULL_PARSED.sorted.txt", "rb") as f:
    for line in f:
        follower = line.strip().split("\t")[1]
        followee = line.strip().split("\t")[0]
        if follower not in user_info:
            initialize_user_info(follower)
        user_info[follower]["num_friends"] += 1
        if followee not in user_info:
            initialize_user_info(followee)
        user_info[followee]["num_followers"] += 1
        count += 1
        if count % 1000000 == 0:
            print count
            #break

with open(sys.argv[3], "rb") as f: # number of urls posted
    for line in f:
        # count # tweets per person
        fields = line.strip().split(" ")
        user = fields[1]
        if user not in user_info:
            initialize_user_info(user)
            #continue
        user_info[user]["num_tweets"] = int(fields[0])

print "getting number of tweets received"
with open(sys.argv[4], "rb") as f: # number of urls received
    for line in f:
        fields = line.strip().split(" ")
        if fields[1] not in user_info:
            continue
        user_info[fields[1]]["num_tweets_received"] = int(fields[0])
'''

url_external_pop = dict()
with open(sys.argv[5], "rb") as f: # external popularity
    for line in f:
        fields = line.strip().split(" ")
        url_external_pop[fields[1]] = int(fields[0])

url_pop = dict()
with open(sys.argv[2], "rb") as f: # internal popularity (# posted) <- ground truth
    for line in f:
        fields = line.strip().split(" ")
        url_pop[fields[1]] = int(fields[0])


print "calculating num tweets and num tweets received; getting total/average external pop"
with open(sys.argv[1], "rb") as f:
    for line in f:
        # get the total popularity of posted tweets and then divide to get averages
        fields = line.strip().split(",")
        user = fields[0]
        url = fields[1]
        if user not in user_info:
            continue
        if url in url_external_pop:
            user_info[user]["total_pop"] += url_external_pop[url]

for user in user_info:
    user_info[user]["num_tweets"] = len(user_info[user]["tweets"])
    received_list = []
    for f in user_info[user]["followees"]:
        if f in user_info:
            for t in user_info[f]["tweets"]:
                received_list.append(t)
    user_info[user]["num_tweets_received"] = len(received_list)
    if user_info[user]["num_tweets"] > 0:
        user_info[user]["avg_ext_pop_of_posts"] = float(user_info[user]["total_pop"])/user_info[user]["num_tweets"] # will be ok when splitting posts into training/test by day
    if user_info[user]["num_tweets_received"] > 0:
        user_info[user]["percent_tweets_posted"] = float(user_info[user]["num_tweets"])/user_info[user]["num_tweets_received"]

# reciprocal links
print "getting reciprocal links"
with open("nytimes_reciprocal_links.txt", "rb") as f:
    for line in f:
        user1 = line.strip().split("\t")[1]
        if user1 not in user_info:
            continue
        user_info[user1]["reciprocal_links"] += 1


# when the user account was created
print "getting user account activity features"
fmt = "%a %b %d %H:%M:%S +0000 %Y"
with open("../NYTimesData/CRAWL_AND_PARSE_DATA/NYTIMES_FULL/TwitterCrawlUserInfo/PARSED_Crawl_Data_USER_INFO_5_13a_.txt", "rb") as f:
    for line in f:
        fields = line.strip().split("\t")
        user = fields[1]
        if user in user_info:
            total_activity = fields[4]
            # crawling was done on june 3, 2013 so we're using that as the reference point
            num_days_active = dt.datetime.strptime("03 Jun 13", "%d %b %y") -  dt.datetime.strptime(fields[5], fmt)
            num_days_active = int(num_days_active.days)
            user_info[user]["days_since_creation"] = num_days_active
            user_info[user]["avg_activity"] = float(fields[4])/num_days_active # (rounded) activity per day
            user_info[user]["num_friends"] = int(fields[3])
            user_info[user]["num_followers"] = int(fields[2]) # more accurate than parsing the graph
            #print num_days_active, fields[4], user_info[user]["avg_activity"]

# selectivity features
# # urls received, # urls tweeted/# urls received, average popularity of posts by user 
# need to make sure that we're only calcualting these from the training set

print "num users", len(user_info)
fout = open(sys.argv[6], "w+")
fout.write("User\tURL")
user_keys = user_info[user_info.keys()[0]].keys()
user_keys.remove("followees")
user_keys.remove("tweets")
for k in user_keys:
    print k
    fout.write("\t" + k)
fout.write("\n")
#fout.write("User\tURL\tuser_followers\tuser_friends\tuser_num_tweets\tuser_num_tweets_received\tavg_ext_pop_of_tweets\tURL_POP\n")
count = 0
with open(sys.argv[1], "rb") as f:
    for line in f:
        fields = line.strip().split(",")
        user = fields[0]
        url = fields[1]
        if user not in user_info:
            continue
        #if url not in url_pop:
        #    continue
        #print user_info[user], url_pop[url], user, url
        fout.write(user + "\t" + url)
        for  k in user_keys:
            fout.write("\t" + str(user_info[user][k]))
        fout.write("\n")
        count += 1
        if count % 10000 == 0:
            print count
        #fout.write(user + "\t" + url + "\t" + str(user_info[user]["num_followers"]) + "\t" + str(user_info[user]["num_friends"]) + "\t" + str(user_info[user]["num_tweets"]) + "\t" + str(user_info[user]["num_tweets_received"]) + "\t" + str(float(user_info[user]["total_pop"])/user_info[user]["num_tweets"]  ) + "\t" + str(url_pop[url]) + "\n")

fout.close()
