'''
Created on Sep 20, 2012

@author: kingsfield
'''
from collections import defaultdict
#import nltk
#from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import csv
import re
from util import Util
from nltk.metrics import edit_distance
import os


train_file = '/home/kingsfield/Desktop/BestBuyPc/old_train.csv'
#train_file = '/home/kingsfield/Desktop/kaggle/train.csv'
test_file = '/home/kingsfield/Desktop/BestBuyPc/old_test.csv'
correct_train_file = '/home/kingsfield/Desktop/BestBuyPc/correct_train.csv'
correct_test_file = '/home/kingsfield/Desktop/BestBuyPc/correct_test.csv'
train_word_correct_file = '/home/kingsfield/Desktop/BestBuyPc/train_word_correct.csv'
product_file = '/home/kingsfield/Desktop/BestBuyPc/small_product_data.xml'
product_fold = '/media/64bit_data/product_data/products'
product_word_count_file = '/home/kingsfield/Desktop/BestBuyPc/product_word_count.csv'
normal_file = 'big.txt'

def readfile(f):
    infile = open(f)
    reader = csv.reader(infile, delimiter=",")
    reader.next() # burn the header
    return reader

def writefile(f):
    outfile = open(f, 'w')
    #writer = csv.writer(outfile, delimiter=",")
    writer = outfile
    #writer.writerow(["sku"])
    writer.write("sku\n")
    return writer

def words(text): return re.findall('[a-z]+', text.lower())

def get_count(features):
    model = defaultdict(lambda: 1)
    for f in features:
        model[f] += 1
    return model

def merge_count(model, new):
    for f in new:
        model[f] += new[f]
    return model

def make_product_words():
    files = os.listdir(product_fold)
    count = defaultdict(int)
    idx = 0
    for f in files:
        idx += 1
        print f
        print idx
        with open(os.path.join(product_fold, f)) as fr:
            text = fr.read()
            new = get_count(words(text))
            count = merge_count(count, new)
    return count

def get_product_words_1():
    with open(product_file) as fr:
        text = fr.read()
        return get_count(words(text))

def get_product_words_2():
    with open(product_word_count_file) as fr:
        data = fr.readlines()
    count = dict()
    for line in data:
        ss = line[0:-1].split(':')
        if len(ss) == 2:
            count[ss[0]] = int(ss[1])
    return count

def dump_count(count, f):
    with open(f, 'w') as fw:
        for k in count:
            fw.write('%s:%s\n' % (k, count[k]))
    
def get_normal_words():
    with open(normal_file) as fr:
        text = fr.read()
        return get_count(words(text))


def get_hot_word(thr):
    """get the item word count"""
    reader = readfile(train_file)
    hot_word = defaultdict(int)
    for (__user, __sku, __category, raw_query, ___click_time, ___query_time) in reader:
        words = Util.wordutil.get_words(raw_query)
        for w in words:
            hot_word[w] += 1
    hot_word = sorted(hot_word.items(), key=lambda x: x[0], reverse=True)
    hot_word = dict([w for w in hot_word if w[1] >= thr])
    return hot_word


def split_word_num(word):
    if word.isdigit():
        return word
    if word[-1].isalpha():
        return word
    for idx in xrange(len(word) - 1, 0, -1):
        if not (word[idx].isdigit() and word[idx - 1].isdigit()):
            break
    if idx <= 1:
        return word
    return word[:idx], word[idx:]

def get_split(w):
    if not w.isdigit() and not w.isalpha():
        split = split_word_num(w)
        return split
    else:
        return w

def get_split_hot_hot_word(thr):
    raw_hot_word = get_hot_word(thr).items()
    hot_word = defaultdict(int)
    for word in raw_hot_word:
        w = word[0]
        if not w.isdigit() and not w.isalpha():
            split = split_word_num(w)
            if type(split) == type(()):
                w, num = split
                #print '%s=%s    %s' % (word[0], w, num)
                hot_word[w] += word[1]
                hot_word[num] += word[1] 
            else:
                w = split
                #print '%s' % w 
                hot_word[w] += word[1]
        else:
            hot_word[w] += word[1]
    return hot_word



def filter_correct_word(words):
    p_words_2 = get_product_words_2()
    n_words = get_normal_words()
    filtered_word = defaultdict(int)
    for w in words:
        if w in p_words_2 or w in n_words:
            filtered_word[w] = words[w]
    return filtered_word

def make_query_correct():
    lemmatizer = WordNetLemmatizer()
    hot_word = sorted(get_split_hot_hot_word(1).items(), key=lambda x:x[1], reverse=True)
    lemma_word = defaultdict(int)
    for w in hot_word:
        lemma = lemmatizer.lemmatize(w[0])
        lemma_word[lemma] = w[1]
    correct_word = filter_correct_word(lemma_word)
    #reader = readfile(train_file)
    reader = readfile(test_file)
    #writer = writefile(correct_train_file)
    writer = writefile(correct_test_file)
    idx = 0
    #for (user, sku, category, raw_query, click_time, query_time) in reader:
    for (user, category, raw_query, click_time, query_time) in reader:
        idx += 1
        print idx
        '************************************************'
        raw_query = Util.wordutil.get_correct_test_query(raw_query)
        '************************************************'
        words = Util.wordutil.get_words(raw_query)
        new_words = list()
        for w in words:
            split = get_split(w)
            if type(split) == type(()):
                new_words.extend(list(split))
            else:
                new_words.append(split)
        #print words, new_words
        new_query = ''
        for w in new_words:
            lemma = lemmatizer.lemmatize(w)
            if lemma not in correct_word:
                if lemma.isdigit() or len(lemma) <= 2:
                    new_query += lemma + ' '
                    continue
                dist = [[ww, edit_distance(w, ww)] for ww in correct_word]
                if len(w) <= 4:
                    candidate = [[i[0], i[1], correct_word[i[0]]] for i in dist if i[1] <= 1]
                else:
                    candidate = [[i[0], i[1], correct_word[i[0]]] for i in dist if i[1] <= 3]
                if len(candidate) > 0:
                    correct = sorted(candidate, key=lambda x:(100 - x[1]) * 1000000 + x[2], reverse=True)
                    query_first = [i for i in correct if i[2] >= 100]
                    if len(query_first) > 0:
                        new_query += query_first[0][0] + ' '
                    else:
                        new_query += correct[0][0] + ' '
                else:
                    print 'no candidate: %s' % (lemma)
                    new_query += lemma + ' '
            else:
                new_query += lemma + ' '
        new_query = new_query[0:-1]
        #outline = ','.join([user, sku, category, new_query, '\"' + click_time + '\"', '\"' + query_time + '\"'])
        outline = ','.join([user, category, new_query, '\"' + click_time + '\"', '\"' + query_time + '\"'])
        #writer.writerow([outline])
        writer.write(outline + '\n')
        #print outline
                
if __name__ == '__main__':
#    fw = open(train_word_correct_file, 'w')
#    
#    lemmatizer = WordNetLemmatizer()
#    hot_word = sorted(get_hot_word(1).items(), key=lambda x:x[1], reverse=True)
#    print 'word size=%s' % len(hot_word)
#    hot_word = sorted(get_split_hot_hot_word(1).items(), key=lambda x:x[1], reverse=True)
#    print 'word size=%s' % len(hot_word)
#    lemma_word = defaultdict(int)
#    for w in hot_word:
#        lemma = lemmatizer.lemmatize(w[0])
##        if lemma in lemma_word:
##            print '%s    %s    %s' % (w[0], lemma, lemma == w[0])
#        lemma_word[lemma] = w[1]
#    print 'word size=%s' % len(lemma_word)
#    correct_word = filter_correct_word(lemma_word)
#    print 'correct word size=%s' % len(correct_word)
#    rank = sorted(lemma_word.items(), key=lambda x: x[1], reverse=True)
#    target_word = [w for w in rank if w[0] not in correct_word]
#    print 'target word size=%s' % len(target_word)
#    for w in target_word:
#        if w[0].isdigit() or len(w[0]) <= 2:
#            continue
#        dist = [[ww, edit_distance(w[0], ww)] for ww in correct_word]
#        if len(w[0]) <= 4:
#            candidate = [[i[0], i[1], correct_word[i[0]]] for i in dist if i[1] <= 1]
#        else:
#            candidate = [[i[0], i[1], correct_word[i[0]]] for i in dist if i[1] <= 3]
##        if w[0] == 'hardend':
##            for ca in candidate:
##                print '%s %s %s' % (ca[0], ca[1], correct_word[ca[0]])
#        if len(candidate) > 0:
#            correct = sorted(candidate, key=lambda x:(100 - x[1]) * 1000000 + x[2], reverse=True)
#            query_first = [i for i in correct if i[2] >= 100]
#            if len(query_first) > 0:
#                pass
#                #print '%s %s %s' % (w[0], w[1], query_first[0:3])
#                fw.write('%s %s %s\n' % (w[0], w[1], query_first[0:3]))
#            else:
#                pass
#                #print '%s %s %s' % (w[0], w[1], correct[0:3])
#                fw.write('%s %s %s\n' % (w[0], w[1], correct[0:3]))
#        else:
#            print 'new word=%s' % w[0]

#    count = get_product_words_2()
#    print len(count)

    make_query_correct()    
    print 'done'
    
