from collections import defaultdict, Counter
import networkx as nx
__author__ = 'pstalidis'
import math


# read_data
def refactor(x, old_max=10.0, old_min=-10.0, new_max=5.5, new_min=0.5):
    return round(((x - old_min) / ((old_max - old_min) / (new_max - new_min))) + new_min)


def read_tags(filename="user_taggedartists-timestamps.dat", path="/home/pstalidis/Downloads/hetrec/"):
    orfile = open(path+filename)
    labels = orfile.readline().strip().split()
    data = defaultdict(lambda: defaultdict(int))
    timestamps = set()
    for rline in orfile:
        rating = defaultdict()
        line = rline.strip().split()
        for i in xrange(0, len(labels)):
            rating[labels[i]] = line[i]
        timestamp = int(line[3])
        timestamps.add(timestamp)
        data[(line[1], line[2])][line[0]] = timestamp
    orfile.close()
    # print "total distinct timestamps", len(timestamps)
    timestamps = sorted(list(timestamps))[4:]
    for tag in data.keys():
        for user in data[tag].keys():
            new_value = refactor(data[tag][user], timestamps[-1], timestamps[0], 300.5, 0.5)
            if new_value > 0:
                data[tag][user] = int(new_value)
            else:
                data[tag][user] = 0
    return data


def read_friends(filename="user_friends.dat", path="/home/pstalidis/Downloads/hetrec/"):
    orfile = open(path+filename, 'rb')
    labels = orfile.readline().strip().split()
    graph = nx.Graph()
    for rline in orfile:
        line = rline.strip().split()
        graph.add_edge(line[0], line[1])
    orfile.close()
    return graph


class SocialChurn():
    def __init__(self, tags, friends):
        self.tags = tags
        self.friends = friends

    def new_thinking(self):
        tallies = defaultdict(Counter)
        # for every distinct artist - tag combination
        # for every user who has added this tag
        # create a list of the user's friends
        # who had already added this tag
        for artist in self.tags.keys():
            for user in self.tags[artist].keys():
                common = Counter()
                for friend in self.friends[user]:
                    if self.tags[artist][user] > self.tags[artist][friend]:
                        common[friend] += 1.0   # this could be weighted by time
                # assuming that each one of these friends, influenced the user
                # in the same amount, add the percentage of influence to each friend's tally
                for friend in common:
                    tallies[user][friend] += common[friend] / len(common)
        # for every user we calculate the percentage of influence
        # every one of his friends has exerted to him
        # by dividing each friend's influence by the total influence
        influence = defaultdict(Counter)
        for user in tallies.keys():
            denominator = sum(tallies[user].values())
            for friend in self.friends[user]:
                influence[friend][user] = tallies[user][friend] / denominator
        # by adding how influential each user was to his friends
        # we get a measure of each user's importance
        importance = Counter()
        # rank = Counter()
        for user in influence.keys():
            importance[user] = sum(influence[user].values())
            # rank[user] = importance[user] / len(friends[user])
        return importance

    def pagerank(self):
        tallies = defaultdict(Counter)
        for artist in self.tags.keys():
            for user in self.tags[artist].keys():
                common = Counter()
                for friend in self.friends[user]:
                    if self.tags[artist][user] > self.tags[artist][friend]:
                        # common[friend] += 1.0
                # for friend in common:
                        tallies[user][friend] += common[friend]
        graph = nx.DiGraph()
        for user in tallies.keys():
            for friend in tallies[user].keys():
                graph.add_edge(user, friend, weight=tallies[user][friend])
        importance = nx.pagerank_numpy(graph, weight='weight')
        return Counter(importance)


all_tags = read_tags()
all_friends = read_friends()

from sklearn.cross_validation import train_test_split
train_tags, test_tags = train_test_split(all_tags.keys(), train_size=0.999, random_state=1)
train_set = defaultdict(lambda: defaultdict(int))
test_set = defaultdict(lambda: defaultdict(int))
for tag in train_tags:
    train_set[tag] = all_tags[tag]

for tag in test_tags:
    test_set[tag] = all_tags[tag]

method = SocialChurn(train_set, all_friends)
results = method.new_thinking()
# print results.most_common(15)

churners_by_period = defaultdict(set)
for tag in test_set.keys():
    for user in test_set[tag].keys():
        timestamp = test_set[tag][user]
        for i in xrange(timestamp, 301):
            churners_by_period[i].add(user)

assumed_churners = defaultdict(set)
for period in xrange(1, 301):
    known_churners = churners_by_period[period]
    predicted_churners = set()
    for user in all_friends.nodes():
        accumulator = 0.0
        for friend in all_friends[user]:
            if friend in assumed_churners[period-1]:
                accumulator += results[friend]
        if accumulator > 10.0:
            predicted_churners.add(user)
    for next_period in xrange(period, 301):
        assumed_churners[period] = known_churners | predicted_churners
    print "---------------------------------------------------------"
    print "time period:", period
    print "true positive:", len(predicted_churners & churners_by_period[300])
    print "false positive:", len(predicted_churners) - len(predicted_churners & churners_by_period[300])
    print "true negative:", len(all_friends.nodes()) - len(predicted_churners | churners_by_period[300])
    print "false negative:", "?"
