'''
Created on Sep 10, 2012

@author: kingsfield
'''
from collections import defaultdict
import csv
import random
from util import Util, Constant
import time
from datetime import datetime

#train_file = '/home/kingsfield/Desktop/BestBuyPc/small_train_without_stem_0911.csv'
train_file = '/home/kingsfield/Desktop/BestBuyPc/correct_train.csv'
#train_file = '/home/kingsfield/Desktop/BestBuyPc/correct_train.csv'
test_file = '/home/kingsfield/Desktop/BestBuyPc/old_train.csv'
out_file = '/home/kingsfield/Desktop/BestBuyPc/out/random_naive_bayes.csv'
weak_path = '/home/kingsfield/Desktop/BestBuyPc/weak_predict/'

GLOBAL_QUERY = 8
GLOBAL_BIGRAM_QUERY = 6

st_date = datetime.strptime('2011-08-11 04:00:17', '%Y-%m-%d %H:%M:%S')
ed_date = datetime.strptime('2011-10-31 10:17:42', '%Y-%m-%d %H:%M:%S')
duration = (ed_date - st_date).days
block_size = 8
MAX_BLOCK = block_size - 1
block = duration / block_size

GLOBAL_WEAK_PREDICTOR_SIZE = 1
GLOBAL_BAGGING_RATIO = 100
GLOBAL_FEATURE_RATIO = 1
GLOBAL_FILE_SIZE = 42366

def get_pair(lt):
    'lt must be sorted before'
    size = len(lt)
    return [str(lt[i]) + '_' + str(lt[j]) for i in xrange(size) for j in xrange(i + 1, size)]

def get_bigram_word(query, hot_words, cat):
    words = Util.wordutil.get_correct_words(query)
    words = [w for w in words if w not in Constant.stopwords and w in hot_words[cat]]
    words.sort()
    bigram = get_pair(words)
    return bigram

def get_time_idx(t):
    return min((t - st_date).days / block, MAX_BLOCK)

def readfile(f):
    #with open(file) as infile:
    infile = open(f)
    reader = csv.reader(infile, delimiter=",")
    reader.next() # burn the header
    return reader

def writefile(f):
    #with open(f, "w") as outfile:
    outfile = open(f, 'w')
    writer = csv.writer(outfile, delimiter=",")
    writer.writerow(["sku"])
    return writer

def get_word_count(raw_data):
    queries = [line.split(',')[3] for line in raw_data]
    word_count = defaultdict(int)
    for query in queries:
        words = Util.wordutil.get_correct_words(query)
        words = [w for w in words if w not in Constant.stopwords]
        for w in words:
            word_count[w] += 1
    return word_count
    
def wrapp_line(raw_line, random_words):
    uid, sku, cat, query, click_t, query_t = raw_line.split(',')
    #uid, sku, cat, query, click_t, query_t, old_query = raw_line.split(',')
    words = Util.wordutil.get_correct_words(query)
    words = [w for w in words if w in random_words]
    if len(words) == 0:
        return None
    else:
        new_query = ' '.join(words)
        #return ','.join([uid, sku, cat, new_query, click_t, query_t, old_query])
        return ','.join([uid, sku, cat, new_query, click_t, query_t])

def get_train_files(GLOBAL_WEAK_PREDICTOR_SIZE):
    return ['%s%s' % (weak_path, fdx) for fdx in xrange(GLOBAL_WEAK_PREDICTOR_SIZE)]

def make_rnb_train_file():
    with open(train_file) as fr:
        raw_data = fr.readlines()
    word_count = get_word_count(raw_data)
    words = word_count.keys()
    filenames = get_train_files(GLOBAL_WEAK_PREDICTOR_SIZE)
    for filename in filenames:
        'get bagging idx'
        bagging_size = int(GLOBAL_FILE_SIZE * GLOBAL_BAGGING_RATIO)
        file_size = GLOBAL_FILE_SIZE - 1
        feature_size = int(len(words) * GLOBAL_FEATURE_RATIO)
        random.shuffle(words)
        random_words = set(words[0:feature_size])
        bagging = [random.randint(1, file_size) for __i in xrange(bagging_size)]
        #bagging = [i for i in xrange(1, bagging_size)]
        with open(filename, 'w') as fw:
            for idx in bagging:
                new_line = wrapp_line(raw_data[idx], random_words)
                if new_line:
                    fw.write(new_line)
        
def get_item_count(train_file):
    'no time feature now'
    reader = readfile(train_file)
    item_count = defaultdict(lambda: defaultdict(int))
    time_item_count = defaultdict(lambda:defaultdict(lambda: defaultdict(int)))
    #for (__user, sku, category, __query, click_time, __query_time, __old_query) in reader:
    for (__user, sku, category, __query, click_time, __query_time) in reader:
        try:
            t = datetime.strptime(click_time, '%Y-%m-%d %H:%M:%S.%f')
        except:
            t = datetime.strptime(click_time, '%Y-%m-%d %H:%M:%S')
        time_block = get_time_idx(t) 
        item_count[category][sku] += 1
        time_item_count[time_block][category][sku] += 1
        
    item_sort = dict()
    for category in item_count:
        item_sort[category] = sorted(item_count[category].items(), \
                                      key=lambda x: x[1])
        item_sort[category].reverse()
    return item_count, item_sort, time_item_count
    
def get_cat_statistic(item_sort, time_item_sort):
    cat_count = defaultdict(lambda: defaultdict(int))
    for cat in item_sort:
        sum_query = sum([i[1] for i in item_sort[cat]])
        hot_query = 0
        idx = 0
        __jdx = 0
        sum_size = len(item_sort[cat])
        while True:
            if idx >= sum_size or item_sort[cat][idx][1] < GLOBAL_QUERY:
                break
            hot_query += item_sort[cat][idx][1]
            idx += 1
#            if item_sort[cat][idx][1] >= GLOBAL_PREDICT_QUERY:
#                jdx += 1
        if idx < 5:
            idx = 5 
        cat_count[cat][Constant.HOT_SIZE] = idx
        cat_count[cat][Constant.SUM_SIZE] = sum_size
        cat_count[cat][Constant.HOT] = hot_query
        cat_count[cat][Constant.SUM] = sum_query
        cat_count[cat][Constant.PREDICT_HOT_SIZE] = idx
    for t in xrange(block_size):
        for cat in item_sort:
            sum_query = sum([i[1] for i in time_item_sort[t][cat].items()])
            cat_count[cat][t] = sum_query
    cat_rank = cat_count
    return cat_rank    

def get_unigram_model(train_file, item_sort, cat_statistic):
    """get the item word count"""
    reader = readfile(train_file)
    item_word = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
    cat_word = defaultdict(lambda: defaultdict(int))
    #for (__user, sku, category, query, ___click_time, ___query_time, __old_query) in reader:
    for (__user, sku, category, query, ___click_time, ___query_time) in reader:
        bound = cat_statistic[category][Constant.HOT_SIZE]
        popular = [i[0] for i in item_sort[category][0:bound]]
        if sku in popular:
            words = Util.wordutil.get_correct_words(query)
            for w in words:
                if w not in Constant.stopwords:
                    item_word[category][sku][w] += 1
                    cat_word[category][w] += 1
    return item_word, cat_word

def train():
    models = list()
    filenames = get_train_files(GLOBAL_WEAK_PREDICTOR_SIZE)
    for filename in filenames:
        print 'train %s' % filename
        item_count, item_sort, month_item_count = get_item_count(filename)
        cat_statistic = get_cat_statistic(item_sort, month_item_count)
        item_word, __cat_word = get_unigram_model(filename, item_sort, cat_statistic)
        models.append([1, 20, item_word, item_count, cat_statistic, month_item_count])
    hot_skus = dict()
    for category in cat_statistic:
        bound = cat_statistic[category][Constant.HOT_SIZE]
        hots = [x[0] for x in item_sort[category][0:bound]]
        hot_skus[category] = hots
    return models, hot_skus

def predict_sku_pool(category, hot_sku):
    hots = hot_sku[category]
    return hots

def time_bayes_query_prediction(words, cat, sku, alpha, beta, item_word, item_count, cat_statistic, month_cat_item_dict, t):
    """predict the probability of click under this query"""
    ' // TODO alpha and beta'
    cat_m = cat_statistic[cat][t]
    cat_c = cat_statistic[cat][Constant.HOT]
    p_m = (month_cat_item_dict[t][cat][sku] + alpha) * 1.0 / (cat_m + beta)
    p_i = (item_count[cat][sku] + alpha) * 1.0 / (cat_c + beta) 
    #print 'p_i = % s' % (p_m)
    p = p_m
    for w in words:
        p_wi = (item_word[cat][sku].get(w, 0) + alpha) * 1.0 / (cat_c + beta)
        p *= p_wi / p_i
    return p 

def bayes_query_prediction(words, cat, sku, alpha, beta, item_word, item_count, cat_statistic):
    """predict the probability of click under this query"""
    ' // TODO alpha and beta'
    cat_c = cat_statistic[cat][Constant.HOT]
    p_i = (item_count[cat][sku] + alpha) * 1.0 / (cat_c + beta)
    p = p_i
    for w in words:
        p_wi = (item_word[cat][sku].get(w, 0) + alpha) * 1.0 / (cat_c + beta)
        p *= p_wi / p_i
    return p 

def rnb_predict(models, category, query, click_time, hot_sku):
    hots = predict_sku_pool(category, hot_sku)
    words = Util.wordutil.get_correct_words(query)
    words = [w for w in words if w not in Constant.stopwords]
    try:
        t = datetime.strptime(click_time, '%Y-%m-%d %H:%M:%S.%f')
    except:
        t = datetime.strptime(click_time, '%Y-%m-%d %H:%M:%S')
    time_block = get_time_idx(t)
    rank = [[sku, \
             sum([time_bayes_query_prediction(words, category, sku, alpha, beta, item_word, item_count, cat_statistic, time_cat_item_dict, time_block) \
                  for alpha, beta, item_word, item_count, cat_statistic, time_cat_item_dict in models])] \
            for sku in hots]
    rank = sorted(rank, key=lambda x:x[1], reverse=True)
    guesses = [i[0] for i in rank[0:5]]
    return guesses

def make_rnb_predict(models, hot_sku):
    reader = readfile(test_file)
    writer = writefile(out_file)
    for (__user, __sku, category, query, click_time, ___query_time) in reader:
    #for (__user, category, query, click_time, ___query_time) in reader:
        try:
            guesses = rnb_predict(models, category, query, click_time, hot_sku)
            writer.writerow([" ".join(guesses)])
        except (TypeError, KeyError): # a category we haven't seen before
            writer.writerow(["0"])

def main():
    st = time.time()
    models, hot_sku = train()
    make_rnb_predict(models, hot_sku)
    ed = time.time()
    print 'cost=%d' % int(ed - st)

if __name__ == '__main__':
    make_rnb_train_file()
    #main()
    print 'done'
