#/usr/bin/env python
#-*- coding:utf8 -*-

from nltk.probability import FreqDist
from nltk.corpus import PlaintextCorpusReader
from nltk.stem.porter import PorterStemmer
import math, re, copy, sys, nltk

DATA_DIR = "data\\"
#DATA_DIR = "data_wt10g\\"

def load_org_map():
    #return: {351:0.342, 352:0.234....}
    org_map_dict = dict()
    for ln in open(DATA_DIR+"origin_query.stat", 'r'):
        trash, q_num, map_v = ln.strip().split()
        org_map_dict[int(q_num)] = float(map_v)
    return org_map_dict

def load_exp2_map():
    #return: {351:[0.342, 0.223,...], 352:[0.443,0.334...]...}
    exp2_map_dict = dict()
    for ln in open(DATA_DIR+"exp2_word_list_map.txt", "r"):
        q_num, exp1, exp2, map_v = ln.strip().split()
        q_num = int(q_num)
        map_v = float(map_v)
        if not exp2_map_dict.has_key(q_num):
            exp2_map_dict[q_num] = []
        exp2_map_dict[q_num].append(map_v)
    return exp2_map_dict

def load_exp1_words():
    #return wordlist, exp1_map
    #{351:{0:"apple", 1:"orange"...}, .....}
    #{351:{0:0.285, 1:0.255...}, .....}
    word_list_dict = dict()
    exp_map_dict = dict()
    for ln in open(DATA_DIR+"top_100_exp_map.txt", "r"):
        q_num, rank, exp, map = ln.strip().split()
        q_num = int(q_num)
        rank = int(rank)
        map = float(map)
        if not word_list_dict.has_key(q_num):
            word_list_dict[q_num]= dict()
        word_list_dict[q_num][rank] = exp
        if not exp_map_dict.has_key(q_num):
            exp_map_dict[q_num] = dict()
        exp_map_dict[q_num][rank] = map
    return word_list_dict, exp_map_dict

def load_orgq_words(query_num_start):
    #return origin query words as a list
    #{351:["fruit","health"...],...}
    words = dict()
    i = 0
    for ln in open(DATA_DIR+"org_query_word_list.txt", 'r'):
        words[query_num_start+i] = ln.strip().split()
        i += 1
    return words

def cnt_coocur_num(pos_list1_org, pos_list2_org):
    pos_list1 = copy.copy(pos_list1_org)
    pos_list2 = copy.copy(pos_list2_org)
    WINDOW_SIZE = 12*5
    co_num = 0
#    print pos_list1, pos_list2
    while len(pos_list1)!=0 and len(pos_list2)!=0:
        i=0
        if pos_list1[0]<pos_list2[0]:
            while(i<len(pos_list2) and pos_list1[0]+WINDOW_SIZE>pos_list2[i]):
                co_num += 1
                #print pos_list1[0], pos_list2[i]
                i += 1
            pos_list1.pop(0)
        else:
            while(i<len(pos_list1) and pos_list2[0]+WINDOW_SIZE>pos_list1[i]):
                co_num += 1
                #print pos_list2[0], pos_list1[i]
                i += 1
            pos_list2.pop(0)
    return co_num

def cnt_strong_coocur_num(pos_list1_org, pos_list2_org, pos_list3_org):
    pos_list1 = copy.copy(pos_list1_org)
    pos_list2 = copy.copy(pos_list2_org)
    pos_list3 = copy.copy(pos_list3_org)
    WINDOW_SIZE = 15*5
    co_num = 0
    while len(pos_list1)!=0 and len(pos_list2)!=0 and len(pos_list3)!=0:
        i = 0
        if pos_list1[0]<=pos_list2[0] and pos_list1[0]<=pos_list3[0]:
            while i<len(pos_list2) and pos_list1[0]+WINDOW_SIZE>pos_list2[i]:
                j = 0
                while j<len(pos_list3) and pos_list1[0]+WINDOW_SIZE>pos_list3[j]:
                    co_num += 1
#                    print pos_list1[0],pos_list2[i],pos_list3[j]
                    j += 1
                i += 1
            pos_list1.pop(0)
        elif pos_list2[0]<=pos_list1[0] and pos_list2[0]<=pos_list3[0]:
            while i<len(pos_list1) and pos_list2[0]+WINDOW_SIZE>pos_list1[i]:
                j = 0
                while j<len(pos_list3) and pos_list2[0]+WINDOW_SIZE>pos_list3[j]:
                    co_num += 1
#                    print pos_list1[i],pos_list2[0],pos_list3[j]
                    j += 1
                i += 1
            pos_list2.pop(0)
        elif pos_list3[0]<=pos_list1[0] and pos_list3[0]<=pos_list2[0]:
            while i<len(pos_list1) and pos_list3[0]+WINDOW_SIZE>pos_list1[i]:
                j = 0
                while j<len(pos_list2) and pos_list3[0]+WINDOW_SIZE>pos_list2[j]:
                    co_num += 1
#                    print pos_list1[i],pos_list2[j],pos_list3[0]
                    j += 1
                i += 1
            pos_list3.pop(0)
    return co_num

def compMinDist(pos_list1, pos_list2):
    min_dist = sys.maxint
    i = 0
    j = 0 
    while i<len(pos_list1) and j<len(pos_list2):
        if pos_list1[i]<pos_list2[j]:
            min_dist = min(min_dist, pos_list2[j]-pos_list1[i])
            i += 1
        else:
            min_dist = min(min_dist, pos_list1[i]-pos_list2[j])
            j += 1
    if min_dist == sys.maxint:
        min_dist = 0
    return min_dist

def compAllOccurNum(qws_pos_list, sep_pos_list):
    #qws_pos_list: list of query word' position list
    qws_exist_flags = []
    for i,qw_pos_l in enumerate(qws_pos_list):
        qws_exist_flags.append([])
        cur_pos = 0
        for j,seq_p in enumerate(sep_pos_list):
            if cur_pos >= len(qw_pos_l):
                qws_exist_flags[i].append(0)
            elif qw_pos_l[cur_pos]<seq_p:
                qws_exist_flags[i].append(1)
                while cur_pos<len(qw_pos_l) and qw_pos_l[cur_pos]<seq_p:
                    cur_pos += 1
            else:
                qws_exist_flags[i].append(0)
    #res_flag: [0,1,1,0,0,1...]
    res_flag = [1]*len(qws_exist_flags[0])
    for i in range(len(qws_exist_flags)):
        for j,flag in enumerate(qws_exist_flags[i]):
            res_flag[j] =  res_flag[j] and flag
    return sum(res_flag)

def fb_text_generator(q_num):
    text = ""
    f = open(DATA_DIR+"%d_text.txt" % q_num, "r")
    i = 0
    while True:
        ln = f.readline()
        i += 1
#        print "???????"+ln+"????", len(text)
        if text=="" and not ln:
            break
        if re.match("^=========", ln):
            yield text
            text = ""
        else:
            text = "%s%s" % (text, ln)
        if i>1000000:
            print "OOOOOps in %d" % q_num
            yield text
            break

def comp_cosine(vec1, vec2):
    if len(vec1)!=len(vec2):
        print "ERROR!! vector length is not equal!"
        return -1
    numerator = 0
    denominator1 = 0
    denominator2 = 0
    for i,ele in enumerate(vec1):
        numerator += ele*vec2[i]
        denominator1 += ele**2
        denominator2 += vec2[i]**2
    result = 0.0
    if denominator1*denominator2 != 0 :
        result = float(numerator)/math.sqrt(denominator1*denominator2)
    return result

def gen_exp2_feature(q_num, exp_words_dict, top_n=20):
    #exp_words_dict: {0:"apple", 1:"orange"...}
    #return: {0:0.123, 1:0.234, 2:0.112....}
    exp1_occu_vec_dict = dict()
    exp2_cosine_dict = dict()
    for txt in fb_text_generator(q_num):
        for rank in range(top_n):
#            print "##########rank: %d" % ( rank)
            exp_w = exp_words_dict[rank]
            occu_num = len(re.findall("\W"+exp_w, txt, re.IGNORECASE))
            if not exp1_occu_vec_dict.has_key(rank):
                exp1_occu_vec_dict[rank] = []
            exp1_occu_vec_dict[rank].append(occu_num)
    i = 0
    for rank1 in range(top_n):
        vec1 = exp1_occu_vec_dict[rank1]
        for rank2 in range(rank1+1, top_n):
            vec2 = exp1_occu_vec_dict[rank2]
            cosine = comp_cosine(vec1, vec2)
            if cosine < 0:
                print "ERROR!!"
                return
            exp2_cosine_dict[i] = cosine
            i += 1
        corpus_fn = "%d_text.txt" % q_num
 
    corpus = PlaintextCorpusReader(DATA_DIR+"", corpus_fn)
    all_raw_words =  corpus.words()
    word_dist = FreqDist()
    words_total_num = len(corpus.words())
    text = open(DATA_DIR+"%d_text.txt" % q_num, "r").read()
    for w in corpus.words():
        w = PorterStemmer().stem(w).lower()
        word_dist.inc(w)
    #positions of text separator:"============================"
    sep_poss = []
    for m in re.finditer("="*40, text):
        sep_poss.append(m.start())
    features = []
    labels = []
    stem_func = lambda w: PorterStemmer().stem(w).lower()
    num = 0
    org_map_dict = load_org_map()
    exp2_map_dict = load_exp2_map()
    for i in range(top_n):
        for j in range(i+1, top_n):
#            print "doing exp words: %s %s " % (exp_words_dict[i], exp_words_dict[j])
            w1_cnt = word_dist[stem_func(exp_words_dict[i])]
            w2_cnt = word_dist[stem_func(exp_words_dict[j])]
            f1 = 0.0
            f11 = 0.0
            if w1_cnt > 0:
                f1 = math.log(float(w1_cnt)/words_total_num)
            if w2_cnt > 0:
                f11 = math.log(float(w2_cnt)/words_total_num)
            #feature 3
            coocur_cnt = 0
            min_dist = 0
            w1_poss = []
            w2_poss = []
            for m in re.finditer("\W"+exp_words_dict[i], text, re.IGNORECASE):
                w1_poss.append(m.start())
            for m in re.finditer("\W"+exp_words_dict[j], text, re.IGNORECASE):
                w2_poss.append(m.start())
            if len(w1_poss)!=0:
                co_num =  cnt_coocur_num(w1_poss, w2_poss)
                coocur_cnt += co_num
                min_dist = compMinDist(w1_poss, w2_poss)
            f3 = 0.0
            if coocur_cnt>0:
                f3 = math.log( (1.0*coocur_cnt)/words_total_num )
            f7 = 0.0
            if min_dist>0:
                f7 = math.log( min_dist )
            features.append([f1, f11, f3, f7])
            exp2_map_v = exp2_map_dict[q_num][num]
            org_map_v = org_map_dict[q_num]
            label = 0
            if (org_map_v-0.0000005<0 and exp2_map_v-0.0000005>0) or (org_map_v-0.0000005>0 and (exp2_map_v-org_map_v)/org_map_v>0.005):
                label = 1
            labels.append(label)
            num += 1
    
    return exp2_cosine_dict, features, labels


def gen_exp1_feature(q_num, exp_words_dict, orgq_words, exp1_map, org_map):
    #q_num: 351
    #exp_words_dict: {0:"apple", 1:"orange"...}
    #orgq_words: ["fruit","health"...]
    #exp1_map: {0:0.25343, 1:0.1234, ...}
    #org_map : 0.0341
    corpus_fn = "%d_text.txt" % q_num
    corpus = PlaintextCorpusReader(DATA_DIR+"", corpus_fn)
    all_raw_words =  corpus.words()
    word_dist = FreqDist()
    words_total_num = len(corpus.words())
    text = open(DATA_DIR+"%d_text.txt" % q_num, "r").read()
    for w in corpus.words():
        w = PorterStemmer().stem(w).lower()
        word_dist.inc(w)
    #positions of text separator:"============================"
    sep_poss = []
    for m in re.finditer("="*40, text):
        sep_poss.append(m.start())
    max_fs = [-sys.maxint]*5#5 is num of feature
    min_fs = [sys.maxint]*5
    features = []
    labels = []
    orgq_word_poss = []
    for qn,oq in enumerate(orgq_words):
        orgq_word_poss.append([])
        for m in re.finditer("\W"+oq, text, re.IGNORECASE):
            orgq_word_poss[qn].append(m.start())
    feature_words_list = []
    for rank,exp in exp_words_dict.items():
        #good:bad = 1:3(num of total good terms is 500)
        exp_word_poss = []
        for m in re.finditer("\W"+exp, text, re.IGNORECASE):
            exp_word_poss.append(m.start())
        #feature 1
        word_cnt = word_dist[PorterStemmer().stem(exp).lower()]
        if word_cnt < 3:
            #filter out noise
            continue
        f1 = 0.0
        if word_cnt>0:
            f1 = math.log(1.0*word_cnt/words_total_num)
        #feature 3
        orgq_num = len(orgq_words)
        coocur_cnt = 0
        dist_all = 0
        for wps in orgq_word_poss:
            if len(wps)!=0:
                co_num =  cnt_coocur_num(exp_word_poss, wps)
                coocur_cnt += co_num
                dist = compMinDist(exp_word_poss, wps)
                dist_all += dist*co_num
        f3 = 0.0
        if coocur_cnt>0:
            f3 = math.log( (1.0*coocur_cnt)/(orgq_num*words_total_num) )
        #feature 5
        str_coocur_cnt = 0
        for i,oq1 in enumerate(orgq_words):
            for oq2 in orgq_words[i+1:]:
                orgq_word_poss1 = []
                orgq_word_poss2 = []
                for m in re.finditer("\W"+oq1, text, re.IGNORECASE):
                    orgq_word_poss1.append(m.start())
                for m in re.finditer("\W"+oq2, text, re.IGNORECASE):
                    orgq_word_poss2.append(m.start())
                str_coocur_cnt += cnt_strong_coocur_num(exp_word_poss, orgq_word_poss1, orgq_word_poss2)
#        print q_num, exp, rank, orgq_num, orgq_words
        f5 = 0.0
        if orgq_num>1:
            f5_val = (2.0*str_coocur_cnt)/(orgq_num*(orgq_num-1)*words_total_num)
            if f5_val-0.0 > 0.0000000001:
                f5 = math.log( f5_val )
        else:
            f5 = f3
        f7_val = 0.0
        if coocur_cnt>0:
            f7_val = dist_all/coocur_cnt
        f7 = 0.0
        if f7_val-0.0 > 0.0000000001:
            f7 = math.log( dist_all/coocur_cnt )
        all_occur_num = compAllOccurNum(orgq_word_poss+[exp_word_poss], sep_poss)
        f9 = math.log( all_occur_num+0.5 )
#        print f1,f3, f5, f7, f9
        label = 0
        if (org_map-0.0000005<0 and exp1_map[rank]-0.0000005>0) or (org_map-0.0000005>0 and (exp1_map[rank]-org_map)/org_map>0.005):
            label = 1
        temp_features = [f1, f3, f5, f7, f9]
        features.append(temp_features)
        feature_words_list.append((rank, exp))
        labels.append(label)
        for i,f in enumerate(temp_features):
            if f-max_fs[i]>0:
                max_fs[i] = f
            if f-min_fs[i]<0:
                min_fs[i] = f
    fn_prefix = "trec7"
    if q_num>400:
        fn_prefix = "trec8"
    if q_num%5 ==0:
        train_data_f = open(DATA_DIR+"%s_exp1_traindata_1.txt" % fn_prefix, "a")
    else:
        train_data_f = open(DATA_DIR+"%s_exp1_traindata_2.txt" % fn_prefix, "a")
    total_data_f = open(DATA_DIR+"%s_exp1_alldata.txt" % fn_prefix, "a")
    for j,f in enumerate(features):
        train_data_f.write("%d " % labels[j])
        total_data_f.write("%d " % labels[j])
        for i,f_n in enumerate(f):
            value = 0.0
            if (f_n-0.0000001>0 or f_n+0.0000001<0) and (max_fs[i]-min_fs[i]-0.0000001>0 or max_fs[i]-min_fs[i]+0.0000001<0):
                value = (f_n-min_fs[i])/(max_fs[i]-min_fs[i])
#            print value,
            train_data_f.write("%d:%f " % (i+1, value))
            total_data_f.write("%d:%f " % (i+1, value))
        train_data_f.write("\n")
        total_data_f.write("\n")
#        print labels[j],""
    fn = "feature_words_wt10g.txt"
    if DATA_DIR=="data\\":
        fn = "feature_words_trec78.txt"
    fw_f = open(DATA_DIR+fn, 'a')
    for rank,exp in feature_words_list:
        fw_f.write("%d\t%d\t%s\n" % (q_num, rank, exp))
    fw_f.close()
    train_data_f.close()
    total_data_f.close()

def clear_it():
    import os
    #clear
    if os.path.exists(DATA_DIR+"trec7_exp1_traindata_1.txt"):
        os.remove(DATA_DIR+"trec7_exp1_traindata_1.txt")
    if os.path.exists(DATA_DIR+"trec7_exp1_traindata_2.txt"):
        os.remove(DATA_DIR+"trec7_exp1_traindata_2.txt")
    if os.path.exists(DATA_DIR+"trec8_exp1_traindata_1.txt"):
        os.remove(DATA_DIR+"trec8_exp1_traindata_1.txt")
    if os.path.exists(DATA_DIR+"trec8_exp1_traindata_2.txt"):
        os.remove(DATA_DIR+"trec8_exp1_traindata_2.txt")
        
    if os.path.exists(DATA_DIR+"trec7_exp1_alldata.txt"):
        os.remove(DATA_DIR+"trec7_exp1_alldata.txt")
    if os.path.exists(DATA_DIR+"trec8_exp1_alldata.txt"):
        os.remove(DATA_DIR+"trec8_exp1_alldata.txt")
    if os.path.exists(DATA_DIR+"trec7_exp2_alldata.txt"):
        os.remove(DATA_DIR+"trec7_exp2_alldata.txt")
    if os.path.exists(DATA_DIR+"trec8_exp1_alldata.txt"):
        os.remove(DATA_DIR+"trec8_exp2_alldata.txt")  
        
    if os.path.exists("data_wt10g\\feature_words_wt10g.txt"):
        os.remove("data_wt10g\\feature_words_wt10g.txt")
    elif os.path.exists("data\\feature_words_trec78.txt"):
        os.remove("data\\feature_words_trec78.txt")
  
def exp1_evaluate(start_r, end_r):
    org_map_dict = load_org_map()
    exp_words_dict, exp1_map_dict = load_exp1_words()
    orgqs_words = load_orgq_words(start_r)
    clear_it()
    for i in range(start_r, end_r+1):
        if not exp_words_dict.has_key(i):
            continue
        gen_exp1_feature(i, exp_words_dict[i], orgqs_words[i], exp1_map_dict[i], org_map_dict[i])
        print "Done %d query!!" % i
                
def exp2_evaluate(start_r, end_r):
    TOP_N = 40
    exp_words_dict, exp1_map_dict = load_exp1_words()
    trec7_exp2_features_f = open(DATA_DIR+"trec7_exp2_alldata.txt", "w")
    trec7_exp2_traindata1_f = open(DATA_DIR+"trec7_exp2_traindata_1.txt", "w")
    trec7_exp2_traindata2_f = open(DATA_DIR+"trec7_exp2_traindata_2.txt", "w")
    trec8_exp2_features_f = open(DATA_DIR+"trec8_exp2_alldata.txt", "w")
    trec8_exp2_traindata1_f = open(DATA_DIR+"trec8_exp2_traindata_1.txt", "w")
    trec8_exp2_traindata2_f = open(DATA_DIR+"trec8_exp2_traindata_2.txt", "w")
    if DATA_DIR=="data_wt10g\\":
        wt10g_exp2_features_f = open(DATA_DIR+"wt10g_exp2_alldata.txt", "w")
    
    exp2_features = dict()
    labels = []
    min_fs = [sys.maxint]*4
    max_fs = [-sys.maxint]*4
#    exp2_res_f = open(DATA_DIR+"exp2_res.txt", "w")
    for q_num in range(start_r, end_r+1):
        if not exp_words_dict.has_key(q_num):
            continue
        cos_features, features, tmp_labels = gen_exp2_feature(q_num, exp_words_dict[q_num], top_n=TOP_N)
        labels.extend(tmp_labels)
        exp2_features[q_num] = []
        for num in range(TOP_N*(TOP_N-1)/2):
            temp_ftr = [cos_features[num]] + features[num]
            exp2_features[q_num].append(temp_ftr)
        print "Done %d query!!" % q_num
    
    #min & max
    for q_exp2_features in exp2_features.values():
        for ftrs in q_exp2_features:
            min_fs = [min(ftrs[i+1],min_fs[i]) for i in range(4)]
            max_fs = [max(ftrs[i+1],max_fs[i]) for i in range(4)]
    #normalize features
    num = 0
    for q_num,q_exp2_features in exp2_features.items():
        for ftrs in q_exp2_features:
            temp_norm_ftrs = [(ftrs[j+1]-min_fs[j])/(max_fs[j]-min_fs[j]) for j in range(4)]
            fs = "%d %d:%f %d:%f %d:%f %d:%f %d:%f" % \
                (labels[num],1,ftrs[0],2,temp_norm_ftrs[0],3,temp_norm_ftrs[1],4,temp_norm_ftrs[2],5,temp_norm_ftrs[3])
            if DATA_DIR=="data\\" and q_num>400:
                trec8_exp2_features_f.write(fs+"\n")
            elif DATA_DIR=="data\\":
                trec7_exp2_features_f.write(fs+"\n")
            elif DATA_DIR=="data_wt10g\\":
                wt10g_exp2_features_f.write(fs+"\n")
            if DATA_DIR=="data\\" and q_num%5 == 0:
                if q_num>400:
                    trec8_exp2_traindata1_f.write(fs+"\n")
                else:
                    trec7_exp2_traindata1_f.write(fs+"\n")
            elif DATA_DIR=="data\\" :
                if q_num>400:
                    trec8_exp2_traindata2_f.write(fs+"\n")
                else:
                    trec7_exp2_traindata2_f.write(fs+"\n")
            print fs
            num += 1
    trec8_exp2_features_f.close()
    trec7_exp2_features_f.close()
    trec8_exp2_traindata1_f.close()
    trec7_exp2_traindata1_f.close()
    trec8_exp2_traindata2_f.close()
    trec7_exp2_traindata2_f.close()
    wt10g_exp2_features_f.close()
#    exp2_res_f.close()
    
    
def test1():
    a = [1, 6, 12, 20]
    b = [5, 17, 22]
    print cnt_coocur_num(a, b)

def test2():
    a = [1, 3, 10, 12]
    b = [2, 3, 15, 17]
    c = [3, 4, 11, 12]
    print cnt_strong_coocur_num(a, b, c)
    
def test3():
    a = [1, 3, 11]
    b = [7, 15, 17]
    print compMinDist(a, b)

def test4():
    sep = [100, 200, 300, 400]
    poss = [[50, 70, 90, 250, 350], [110, 210, 310], [11, 250, 350]]
    print compAllOccurNum(poss, sep)
    
def test5():
    exp_words_dict, exp1_map_dict = load_exp1_words()
    exp2_feature = gen_exp2_feature(351, exp_words_dict[351], top_n=20)
    print exp2_feature
    
if __name__=="__main__":
    exp1_evaluate(351, 450)

    
#    test4()
#    print load_exp_words()
