"""
Creates a benchmark by predicting the most popular skus in each category
"""

from collections import defaultdict
import csv
from util import Util, Constant
import time
from datetime import datetime

wd = "/home/kingsfield/Desktop/BestBuyPc/"

GLOBAL_HOT = 50
GLOBAL_QUERY = 2
GLOBAL_PREDICT_QUERY = 5

def get_popular_skus():
    """Returns 2 dictionary of the most popular skus in each category, one by dict, one by sorted list"""
    with open(wd + "train.csv") as infile:
        reader = csv.reader(infile, delimiter=",")
        reader.next() # burn the header

        cat_item_dict = defaultdict(lambda: defaultdict(int))
        month_cat_item_dict = defaultdict(lambda:defaultdict(lambda: defaultdict(int)))
        for (__user, sku, category, __query, click_time, __query_time, __old_query) in reader:
        #for (__user, sku, category, __query, click_time, __query_time,) in reader:
            cat_item_dict[category][sku] += 1
            try:
                t = datetime.strptime(click_time, '%Y-%m-%d %H:%M:%S.%f')
            except:
                t = datetime.strptime(click_time, '%Y-%m-%d %H:%M:%S')
            month = t.month
            month_cat_item_dict[month][category][sku] += 1
        cat_item_sort = dict()
        for category in cat_item_dict:
            cat_item_sort[category] = sorted(cat_item_dict[category].items(), \
                                          key=lambda x: x[1])
            cat_item_sort[category].reverse()
        return cat_item_dict, cat_item_sort, month_cat_item_dict
    
def get_cat_statistic(item_sort, time_item_sort):
    """get the sum query belong to the category and sum query belong to top 5 items of this category"""
    cat_count = defaultdict(lambda: defaultdict(int))
    for cat in item_sort:
        sum_query = sum([i[1] for i in item_sort[cat]])
        hot_query = 0
        idx = 0
        jdx = 0
        sum_size = len(item_sort[cat])
        while True:
            if idx >= sum_size or item_sort[cat][idx][1] < GLOBAL_QUERY:
                break
            hot_query += item_sort[cat][idx][1]
            idx += 1
            if item_sort[cat][idx][1] >= GLOBAL_PREDICT_QUERY:
                jdx += 1
        if idx < 5:
            idx = 5 
        cat_count[cat][Constant.HOT_SIZE] = idx
        cat_count[cat][Constant.SUM_SIZE] = sum_size
        cat_count[cat][Constant.HOT] = hot_query
        cat_count[cat][Constant.SUM] = sum_query
        cat_count[cat][Constant.PREDICT_HOT_SIZE] = jdx
        
    for month in Constant.MONTH:
        for cat in item_sort:
            sum_query = sum([i[1] for i in time_item_sort[month][cat].items()])
            cat_count[cat][month] = sum_query
    #cat_rank = sorted(cat_count.items(), key=lambda x:x[1][Constant.HOT], reverse=True)
    cat_rank = cat_count
    return cat_rank

def get_cat_count(categories):
    """get the sum query belong to the category and sum query belong to top 5 items of this category"""
    cat_count = defaultdict(lambda: defaultdict(int))
    for cat in categories:
        cat_count[cat][Constant.HOT] = sum([i[1] for i in categories[cat][0:GLOBAL_HOT]])
        cat_count[cat][Constant.SUM] = sum([i[1] for i in categories[cat]])
    #cat_rank = sorted(cat_count.items(), key=lambda x:x[1][Constant.HOT], reverse=True)
    cat_rank = cat_count
    return cat_rank

def get_unigram_model(item_sort, cat_statistic):
    """get the item word count"""
    with open(wd + "train.csv") as infile:
        reader = csv.reader(infile, delimiter=",")
        reader.next() # burn the header
        item_word = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
        cat_word = defaultdict(lambda: defaultdict(int))
        for (__user, sku, category, query, ___click_time, ___query_time, __old_query) in reader:
            #popular = [i[0] for i in item_sort[category][0:5]]
            bound = cat_statistic[category][Constant.HOT_SIZE]
            popular = [i[0] for i in item_sort[category][0:bound]]
            if sku in popular:
                words = Util.wordutil.getwords(query)
                for w in words:
                    if w not in Constant.stopwords:
                        item_word[category][sku][w] += 1
                        cat_word[category][w] += 1
    return item_word, cat_word


def make_predictions(categories):
    """Write the predictions out"""
    with open(wd + "test.csv") as infile:
        reader = csv.reader(infile, delimiter=",")
        reader.next() # burn the header
        with open("/home/kingsfield/Desktop/BestBuyPc/out/popular_skus.csv", "w") as outfile:
            writer = csv.writer(outfile, delimiter=",")
            writer.writerow(["sku"])
            #for (__user, category, __query, __click_time, __query_time, __old_query) in reader:
            for (__user, __sku, category, query, click_time, __query_time) in reader:
                try:
                    guesses = [x[0] for x in categories[category][0:5]]
                    writer.writerow([" ".join(guesses)])
                except (TypeError, KeyError): # a category we haven't seen before
                    writer.writerow(["0"])

def bayes_query_prediction(words, cat, sku, alpha, beta, item_word, cat_item_dict, cat_count):
    """predict the probability of click under this query"""
    '// TODO alpha and beta'
    cat_c = cat_count[cat][Constant.HOT]
    p_i = (cat_item_dict[cat][sku] + alpha) * 1.0 / (cat_c + beta)
    p = p_i
    for w in words:
        p_wi = (item_word[cat][sku].get(w, 0) + alpha) * 1.0 / (cat_c + beta)
        p *= p_wi / p_i
    return p 

def time_bayes_query_prediction(words, cat, sku, alpha, beta, item_word, cat_item_dict, cat_count, month_cat_item_dict, t):
    """predict the probability of click under this query"""
    '// TODO alpha and beta'
    factor = 1
    cat_m = cat_count[cat][t]
    cat_c = cat_count[cat][Constant.HOT]
    p_m = (month_cat_item_dict[t][cat][sku] + alpha) * 1.0 / (cat_m + beta * factor)
    p_i = (cat_item_dict[cat][sku] + alpha) * 1.0 / (cat_c + beta * factor) 
    #print 'p_i=%s' % (p_m)
    p = p_m
    for w in words:
        p_wi = (item_word[cat][sku].get(w, 0) + alpha) * 1.0 / (cat_c + beta)
        p *= p_wi / p_i
    return p 


def make_bayes_query_predictions(cat_statistic, cat_item_dict, cat_item_sort, alpha, beta, item_word):
    """Write the predictions with bayes query modification out"""
    #with open(wd + "test.csv") as infile:
    with open(wd + "train.csv") as infile:
        reader = csv.reader(infile, delimiter=",")
        reader.next() # burn the header
        #with open("/home/kingsfield/Desktop/BestBuyPc/out/popular_query_skus_0904b.csv", "w") as outfile:
        with open("/home/kingsfield/Desktop/BestBuyPc/popular_query_skus_train.csv", "w") as outfile:
            writer = csv.writer(outfile, delimiter=",")
            writer.writerow(["sku"])
            #for (__user, category, query, __click_time, __query_time) in reader:
            #for (__user, __sku, category, query, __click_time, __query_time, __old_query) in reader:
            for (__user, __sku, category, query, __click_time, __query_time) in reader:
                try:
                    bound = cat_statistic[category][Constant.HOT_SIZE]
                    hots = [x[0] for x in cat_item_sort[category][0:bound]]
                    words = Util.wordutil.getwords(query)
                    rank = [[sku, bayes_query_prediction(words, category, sku, alpha, beta, item_word, cat_item_dict, cat_statistic)] for sku in hots]
                    rank = sorted(rank, key=lambda x:x[1], reverse=True)
                    #print '%s' % rank
                    guesses = [i[0] for i in rank[0:5]]
                    writer.writerow([" ".join(guesses)])
                except (TypeError, KeyError): # a category we haven't seen before
                    writer.writerow(["0"])
                    
def make_new_bayes_predictions(cat_statistic, cat_item_dict, cat_item_sort, alpha, beta, item_word, time_cat_item_dict, cat_word):
    """Write the predictions with bayes query modification out"""
    with open(wd + "test.csv") as infile:
    #with open(wd + "train.csv") as infile:
        reader = csv.reader(infile, delimiter=",")
        reader.next() # burn the header
        with open("/home/kingsfield/Desktop/BestBuyPc/out/popular_query_skus_0906a.csv", "w") as outfile:
        #with open("/home/kingsfield/Desktop/BestBuyPc/popular_query_skus_train_month.csv", "w") as outfile:
            writer = csv.writer(outfile, delimiter=",")
            writer.writerow(["sku"])
            #for (__user, category, query, click_time, __query_time) in reader:
            for (__user, __sku, category, query, click_time, __query_time , __old_query) in reader:
            #for (__user, __sku, category, query, click_time, __query_time) in reader:
                try:
                    t = datetime.strptime(click_time, '%Y-%m-%d %H:%M:%S.%f')
                except:
                    t = datetime.strptime(click_time, '%Y-%m-%d %H:%M:%S')
                month = t.month
                try:
                    bound = cat_statistic[category][Constant.PREDICT_HOT_SIZE]
                    hots = [x[0] for x in cat_item_sort[category][0:bound]]
                    words = Util.wordutil.getwords(query)
                    words = [w for w in words if w not in Constant.stopwords]
                    query_size = sum([cat_word[category][w] for w in words])
                    if query_size >= 100:
                        rank = [[sku, time_bayes_query_prediction(words, category, sku, alpha, beta, item_word, cat_item_dict, cat_statistic, time_cat_item_dict, month)] for sku in hots]
                    else:
                        rank = [[sku, bayes_query_prediction(words, category, sku, alpha, beta, item_word, cat_item_dict, cat_statistic)] for sku in hots]
                    rank = sorted(rank, key=lambda x:x[1], reverse=True)
                    #print '%s' % rank
                    guesses = [i[0] for i in rank[0:5]]
                    writer.writerow([" ".join(guesses)])
                except (TypeError, KeyError): # a category we haven't seen before
                    writer.writerow(["0"])                
    
def main():
    """Creates the benchmark"""
    st = time.time()
    cat_item_dict, cat_item_sort, month_cat_item_dict = get_popular_skus()
    cat_statistic = get_cat_statistic(cat_item_sort, month_cat_item_dict)
    item_word, cat_word = get_unigram_model(cat_item_sort, cat_statistic)
    make_new_bayes_predictions(cat_statistic, cat_item_dict, cat_item_sort, 1, 100, item_word, month_cat_item_dict, cat_word)
    #make_bayes_query_predictions(cat_statistic, cat_item_dict, cat_item_sort, 1, 100, item_word)
    ed = time.time()
    print 'cost=%d' % int(ed - st)

if __name__ == "__main__":
    main()
#    make_popular_words()
#    __cat_item_dict, cat_item_sort = get_popular_skus()
#    cat_count = get_cat_statistic(cat_item_sort)
#    rank = sorted(cat_count.items(), key=lambda x:x[1][Constant.SUM], reverse=True)
#    with open('/home/kingsfield/Desktop/kaggle/out/cat_hot_count.csv', 'w') as fw:
#        for line in rank:
#            fw.write('%s\n' % str(line))
    #make_predictions(cat_item_sort)
    print 'done'
