'''
Created on Sep 6, 2012

@author: kingsfield
'''

from collections import defaultdict
import csv
from util import Util, Constant
import time
from datetime import datetime
from feature_engeering import get_duplicate_query
import sys

#train_file = '/home/kingsfield/Desktop/BestBuyPc/old_train.csv'
#train_file = '/home/kingsfield/Desktop/BestBuyPc/correct_train.csv'
#train_file = '/home/kingsfield/Desktop/BestBuyPc/fast_correct_train_small.csv'
#train_file = '/home/kingsfield/Desktop/BestBuyPc/weak_predict/0'
#test_file = '/home/kingsfield/Desktop/BestBuyPc/fast_correct_train_small.csv'
#out_file = '/home/kingsfield/Desktop/BestBuyPc/out/bigram_boost_bayes0924a__.csv'
#out_file = '/home/kingsfield/Desktop/BestBuyPc/out/bigram_boost_bayes_origin_1.csv'

train_file = 'train.csv'
test_file = 'test.csv'

'TODO: raw_query analysis, it will recommend hot skus when raw_query are too rare'
GLOBAL_QUERY = 8
GLOBAL_BIGRAM_QUERY = 6
w1 = 0.7
w2 = 0.3

st_date = datetime.strptime('2011-08-11 04:00:17', '%Y-%m-%d %H:%M:%S')
ed_date = datetime.strptime('2011-10-31 10:17:42', '%Y-%m-%d %H:%M:%S')
duration = (ed_date - st_date).days
block_size = 12
MAX_BLOCK = block_size - 1
block = duration / block_size

def get_pair(lt):
    'lt must be sorted before'
    size = len(lt)
    return [str(lt[i]) + '_' + str(lt[j]) for i in xrange(size) for j in xrange(i + 1, size)]

def get_bigram_word(raw_query, hot_words, cat, method):
    if method == Constant.CORRECT:
        words = Util.wordutil.get_correct_words(raw_query)
    elif method == Constant.DEFAULT:
        words = Util.wordutil.get_words(raw_query)
    words = [w for w in words if w in hot_words[cat]]
    words.sort()
    bigram = get_pair(words)
    return bigram

def get_time_idx(t):
    return min(int(t) / block, MAX_BLOCK)

def readfile(f):
    #with open(file) as infile:
    infile = open(f)
    reader = csv.reader(infile, delimiter=",")
    reader.next() # burn the header
    return reader

def writefile(f):
    #with open(f, "w") as outfile:
    outfile = open(f, 'w')
    writer = csv.writer(outfile, delimiter=",")
    writer.writerow(["sku"])
    return writer

def get_item_count():
    'no time feature now'
    reader = readfile(train_file)
    item_count = defaultdict(lambda: defaultdict(int))
    time_item_count = defaultdict(lambda:defaultdict(lambda: defaultdict(int)))
    for (__user, sku, category, __query, click_time) in reader:
        time_block = get_time_idx(click_time) 
        item_count[category][sku] += 1
        time_item_count[time_block][category][sku] += 1
        
    item_sort = dict()
    for category in item_count:
        item_sort[category] = sorted(item_count[category].items(), \
                                      key=lambda x: x[1], reverse=True)
        #item_sort[category].reverse()
    smooth_time_item_count = defaultdict(lambda:defaultdict(lambda: defaultdict(int)))
    for time_block in time_item_count:
        for cat in time_item_count[time_block]:
            for sku in time_item_count[time_block][cat]:
                smooth_time_item_count[time_block][cat][sku] = item_count[cat][sku] * 3.0 / block_size
                 
    for time_block in time_item_count:
        for cat in time_item_count[time_block]:
            for sku in time_item_count[time_block][cat]:
                smooth_time_item_count[time_block][cat][sku] = time_item_count[time_block][cat][sku]
                if time_block == 0 or time_block == MAX_BLOCK:
                    smooth_time_item_count[time_block][cat][sku] += time_item_count[time_block][cat][sku]
                if time_block >= 1:
                    smooth_time_item_count[time_block][cat][sku] += time_item_count[time_block - 1][cat][sku]
                if time_block < MAX_BLOCK:
                    smooth_time_item_count[time_block][cat][sku] += time_item_count[time_block + 1][cat][sku]
    return item_count, item_sort, smooth_time_item_count
    
def get_cat_statistic(item_sort, time_item_count):
    cat_statistic = defaultdict(lambda: defaultdict(int))
    for cat in item_sort:
        sum_query = sum([i[1] for i in item_sort[cat]])
        hot_query = 0
        idx = 0
        __jdx = 0
        sum_size = len(item_sort[cat])
        while True:
            if idx >= sum_size or item_sort[cat][idx][1] < GLOBAL_QUERY:
                break
            hot_query += item_sort[cat][idx][1]
            idx += 1
        if idx < 5:
            idx = 5 
        print '--hot size=%d' % idx
        cat_statistic[cat][Constant.HOT_SIZE] = idx
        cat_statistic[cat][Constant.SUM_SIZE] = sum_size
        cat_statistic[cat][Constant.HOT] = hot_query
        cat_statistic[cat][Constant.SUM] = sum_query
        cat_statistic[cat][Constant.PREDICT_HOT_SIZE] = idx
    for t in xrange(block_size):
        for cat in item_sort:
            #sum_query = sum([i[1] for i in time_item_count[t][cat].items()])
            #cat_statistic[cat][t] = sum_query
            '''TODO: pay attention, this is not correct in logic, but I write this for saving time'''
            cat_statistic[cat][t] = cat_statistic[cat][Constant.SUM]
    return cat_statistic    

def get_unigram_model(item_sort, cat_statistic):
    """get the item word count"""
    reader = readfile(train_file)
    item_word = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
    cat_word = defaultdict(lambda: defaultdict(int))
    for (__user, sku, category, raw_query, ___click_time) in reader:
        bound = cat_statistic[category][Constant.HOT_SIZE]
        popular = [i[0] for i in item_sort[category][0:bound]]
        if sku in popular:
            words = Util.wordutil.get_correct_words(raw_query)
            for w in words:
                item_word[category][sku][w] += 1
                cat_word[category][w] += 1
            #words = Util.wordutil.get_words(raw_query)
    return item_word, cat_word

def get_bigram_model(item_word, item_sort, cat_statistic, method):
    'get hot words'
    hot_sku_words = defaultdict(lambda: defaultdict(set))
    for cat in item_word:
        for sku in item_word[cat]:
            hots = item_word[cat][sku].items()
            hot_sku_words[cat][sku] = set([i[0] for i in hots if i[1] >= GLOBAL_BIGRAM_QUERY])
    
    hot_words = dict() 
    for cat in hot_sku_words:
        hot_words[cat] = set()
        for sku in hot_sku_words[cat]:
            hot_words[cat] = hot_words[cat].union(hot_sku_words[cat][sku])
            
    reader = readfile(train_file)
    bigram_item_word = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
    for (__user, sku, category, raw_query, ___click_time) in reader:
        bound = cat_statistic[category][Constant.HOT_SIZE]
        popular = [i[0] for i in item_sort[category][0:bound]]
        if sku in popular:
            bigram = get_bigram_word(raw_query, hot_words, category, method)
            for w in bigram:
                bigram_item_word[category][sku][w] += 1
                cat_statistic[category][Constant.BIGRAM_HOT] += 1
            
    return bigram_item_word, cat_statistic, hot_words
    
def bayes_query_prediction(words, cat, sku, alpha, beta, item_word, item_count, cat_statistic):
    """predict the probability of click under this raw_query"""
    '// TODO alpha and beta'
    cat_c = cat_statistic[cat][Constant.HOT]
    p_i = (item_count[cat][sku] + alpha) * 1.0 / (cat_c + beta)
    p = p_i
    for w in words:
        p_wi = (item_word[cat][sku].get(w, 0) + alpha) * 1.0 / (cat_c + beta)
        p *= p_wi / p_i
    return p 

def time_bayes_bigram_prediction(bigram, cat, sku, alpha, beta, bigram_item_word, item_count, cat_statistic, month_cat_item_dict, t):
    """predict the probability of click under this raw_query"""
    '// TODO alpha and beta'
    cat_m = cat_statistic[cat][t]
    p_m = (month_cat_item_dict[t][cat][sku] + alpha) * 1.0 / (cat_m + beta)
    
    cat_c = cat_statistic[cat][Constant.HOT]
    cat_bigram_c = cat_statistic[cat][Constant.BIGRAM_HOT]
    p_i = (item_count[cat][sku] + alpha) * 1.0 / (cat_c + beta)
    p = p_m
    for w in bigram:
        p_wi = (bigram_item_word[cat][sku].get(w, 0) + alpha) * 1.0 / (cat_bigram_c + beta)
        p *= p_wi / p_i
    return p 

def time_bayes_query_prediction(words, cat, sku, alpha, beta, item_word, item_count, cat_statistic, month_cat_item_dict, t):
    """predict the probability of click under this raw_query"""
    '// TODO alpha and beta'
    cat_m = cat_statistic[cat][t]
    cat_c = cat_statistic[cat][Constant.HOT]
    p_m = (month_cat_item_dict[t][cat][sku] + alpha) * 1.0 / (cat_m + beta)
    p_i = (item_count[cat][sku] + alpha) * 1.0 / (cat_c + beta) 
    p = p_m
    for w in words:
        p_wi = (item_word[cat][sku].get(w, 0) + alpha) * 1.0 / (cat_c + beta)
        p *= p_wi / p_i
    return p 

def boost_bayes(bigram, words, cat, sku, alpha, beta, item_word, bigram_item_word, item_count, cat_statistic, month_cat_item_dict, t):
    p1 = time_bayes_query_prediction(words, cat, sku, alpha, beta, item_word, item_count, cat_statistic, month_cat_item_dict, t)
    p2 = time_bayes_bigram_prediction(bigram, cat, sku, alpha, beta, bigram_item_word, item_count, cat_statistic, month_cat_item_dict, t)
    return w1 * p1 + w2 * p2

        
def make_new_bayes_predictions(models, user, category, raw_query, click_time):
    cat_statistic, item_count, item_sort, alpha, beta, item_word, bigram_item_word, time_cat_item_dict, cat_word, hot_words = models[0]
    time_block = get_time_idx(click_time)
    try:
        bound = cat_statistic[category][Constant.PREDICT_HOT_SIZE]
        'TODO:'
        hots = [x[0] for x in item_sort[category][0:bound]]
    except:
        #writer.writerow(["0"])
        print '%s\t%s' % (0, "")
        continue
    try:
        raw_query = Util.wordutil.get_correct_test_query(raw_query)
        bigram = get_bigram_word(raw_query, hot_words, category, Constant.CORRECT)
        words = Util.wordutil.get_correct_words(raw_query)
        query_size = sum([cat_word[category][w] for w in words])
        if query_size >= 100 and len(bigram) > 0:
            rank = [[sku, boost_bayes(bigram, words, category, sku, alpha, beta, item_word, bigram_item_word, item_count, cat_statistic, time_cat_item_dict, time_block)] for sku in hots]
        elif query_size >= 100 and len(bigram) == 0:
            rank = [[sku, time_bayes_query_prediction(words, category, sku, alpha, beta, item_word, item_count, cat_statistic, time_cat_item_dict, time_block)] for sku in hots]
        else:
            rank = [[sku, bayes_query_prediction(words, category, sku, alpha, beta, item_word, item_count, cat_statistic)] for sku in hots]
        rank = sorted(rank, key=lambda x:x[1], reverse=True)
        guesses = [i[0] for i in rank[0:5]]
        clicked_sku = get_duplicate_query(user, raw_query)
        guesses = [i[0] for i in rank[0:5]]
        guesses = deduplicate(guesses, clicked_sku)
        
        #writer.writerow([" ".join(guesses)])
        print '%s\t%s' % ([" ".join(guesses)], "")
    except (TypeError, KeyError): # a category we haven't seen before
        #writer.writerow([" ".join(hots[0:5])])
        print '%s\t%s' % ([" ".join(hots[0:5])], "")

def deduplicate(guesses, clicked_sku):
    clicked = []
    res = []
    for sku in guesses:
        if sku in clicked_sku:
            clicked.append(sku)
        else:
            res.append(sku)
    res.extend(clicked)
    return res

def read_input(f):
    for line in f:
        # split the line into words
        yield line

def main(separator='\t'):
    models = list()
    item_count, item_sort, month_item_count = get_item_count()
    cat_statistic = get_cat_statistic(item_sort, month_item_count)
    item_word, cat_word = get_unigram_model(item_sort, cat_statistic)
    bigram_item_word, cat_statistic, hot_words = get_bigram_model(item_word, item_sort, cat_statistic, Constant.CORRECT)
    models.append([cat_statistic, item_count, item_sort, 1, 100, item_word, bigram_item_word, month_item_count, cat_word, hot_words])

    st = time.time()    
    data = read_input(sys.stdin)
    for line in data:
        params=line.split(',')
        if len(params) == 6:
            make_new_bayes_predictions(models, params[0], params[2], params[3], params[4])
        else:
            make_new_bayes_predictions(models, params[0], params[1], params[2], params[3])
    ed = time.time()
        
if __name__ == '__main__':
    main()
    print 'done'