#!/bin/env pypy

# the standalone version of new WordRank, not map-reduce style, anything in memory
# it has word segmenter and evaluater
# it can also evaluate the resulting 'word_rank.txt' from simulated_map_reduce or hadoop version
# type './word_rank_standalone -h' to see usage

import math
import re
import sys

CORPUS_PATH = "../data/brent.raw.txt"
ORIGIN_PATH = "../data/brent.txt"
MODEL_PATH = "output/brent.model.txt"
MODEL_PATH2 = "../simulated_map_reduce/output/word_rank.txt"
MODEL_PATH3 = "../hadoop/output/word_rank.txt"
TEST_RESULT_PATH = "output/result.txt"

# max len of word hypothesis
WINSIZE = 11

# ibv parameters
IBV_SINGLE = 0.5

# ebv parameters
EBV_ITERATIONS = 30
EBV_LEARNING_RATE = 0.3
EBV_SHARP_ADDITION = 0.001
EBV_EPS = 1e-5

# word rank parameters
WR_POLY = 0
WR_EXP = 1

def wr_type_set(t):
    global WR_TYPE
    global SCALE, ALPHA, DELTA
    global SCALES, ALPHAS, DELTAS
    global word_rank

    WR_TYPE = t
    if WR_TYPE == WR_POLY:
        SCALE = 32.0
        ALPHA = 6.3
        DELTA = 6.8
        SCALES = [16.0, 32.0, 64.0]
        ALPHAS = [6.2, 6.3, 6.4, 6.5, 6.6]
        DELTAS = [6.6, 6.7, 6.8, 6.9, 7.0]

        def word_rank(lbv, rbv, ibv):
            ibv = max(ibv + DELTA, 0.0)
            return SCALE * lbv * rbv * (ibv ** ALPHA)

    elif WR_TYPE == WR_EXP:
        SCALE = 64.0
        ALPHA = 2.1
        DELTA = 15.6
        SCALES = [64.0, 128.0, 256.0]
        ALPHAS = [1.8, 1.9, 2.0, 2.1, 2.2]
        DELTAS = [15.2, 15.3, 15.4, 15.5, 15.6, 15.7, 15.8, 15.9]

        def word_rank(lbv, rbv, ibv):
            ibv = max(ibv + DELTA, 0.0)
            return SCALE * lbv * rbv * (ALPHA ** ibv)

wr_type_set(WR_POLY)

def is_word(word):
    return re.search("[iI6R3uUaA#O%E&e97Qo()*]", word) and len(word) > 1 or word in ["6", "9" ,"e" ,"o","i" ,"%","*","(" ,"#"]

def main():
    # ******************** arguments ********************
    options = filter(str.islower, ''.join(sys.argv[1: ]))
    forced_training = 'f' in options
    cross_validation = 'c' in options
    debug = 'd' in options
    need_help = 'h' in options
    global MODEL_PATH, MODEL_PATH2, MODEL_PATH3
    if 'y' in options:
        MODEL_PATH = MODEL_PATH2
    elif 'z' in options:
        MODEL_PATH = MODEL_PATH3
    global WR_TYPE
    if 'p' in options:
        print "use WR_POLY"
        wr_type_set(WR_POLY)
    if 'e' in options:
        print "use WR_EXP"
        wr_type_set(WR_EXP)

    if need_help:
        print 'Usage: %s [OPTIONS]' % sys.argv[0]
        print 'Options:'
        print '  -f     forced training'
        print '  -c     cross validation'
        print '  -d     debug mode'
        print '  -h     help'
        print '  -p     word rank poly'
        print '  -e     word rank exp'
        print '  -y     test %s' % MODEL_PATH2
        print '  -z     test %s' % MODEL_PATH3
        return

    # ******************** train ********************
    corpus = open(CORPUS_PATH).readlines()
    print 'corpus loaded: %d lines' % len(corpus)

    model = model_read(MODEL_PATH)

    if ('y' in options or 'z' in options) and model is None:
        print "model `%s' doesn't exist" % MODEL_PATH
        return

    if forced_training or model is None:
        print 'building link structure..'
        lstruct = build_links(corpus)
        print 'link structure built'
        print '%d word hypothesis retrieved' % len(lstruct.words)

        (words, lbv, rbv) = exterior_boundary_values(lstruct)
        print 'exterior boundary values calculation completed'
        print 'remaining words:', len(words)

        mi_model = mutual_information_model(corpus)
        ibv = [ibv_word(mi_model, w) for w in words]
        print 'interior boundary values calculated'

        model_write(MODEL_PATH, words, lbv, rbv, ibv)
        print "trained model saved to `%s'" % MODEL_PATH
    else:
        (words, lbv, rbv, ibv) = model
        print "trained model `%s' loaded" % MODEL_PATH
        
    if debug:
        wr = [word_rank(*vs) for vs in zip(lbv, rbv, ibv)]
        arr = zip(words, wr, lbv, rbv, ibv)
        arr.sort(lambda a, b: cmp(b[1], a[1]))
        print '%11s  %10s %11s %11s %7s' % ('', 'wr', 'lbv', 'rbv', 'ibv')
        for w, v, l, r, i in arr[: 30]:
            print '%11s: %10.2lf %11.6lf %11.6lf %7.2lf' % (w, v, l, r, i)
        print
        raw_input('press enter to continue...')

    # ******************** test ********************
    origin = open(ORIGIN_PATH).readlines()
    if not cross_validation:
        print 'testing...'
        wr = [word_rank(*vs) for vs in zip(lbv, rbv, ibv)]
        f_score = test(corpus, origin, words, wr, TEST_RESULT_PATH)
        print 'F-Score: %.4f' % f_score
    else:
        print 'testing... (cross validation)'
        global SCALE, ALPHA, DELTA
        fmax = (0.0, 0.0, 0.0, 0.0)
        for SCALE in SCALES:
            for ALPHA in ALPHAS:
                for DELTA in DELTAS:
                    wr = [word_rank(*vs) for vs in zip(lbv, rbv, ibv)]
                    f_score = test(corpus, origin, words, wr)
                    fmax = max(fmax, (f_score, SCALE, ALPHA, DELTA))
                    print 'SCALE:%9.1f    ALPHA:%5.2f    DELTA:%5.2f    F-Score:%7.4f' % (SCALE, ALPHA, DELTA, f_score)
        print 'Max F-Score:', fmax

def retrieve_words(corpus):
    freq = {}
    for line in corpus:
        line = line.strip()
        for k in range(1, WINSIZE + 1):
            for i in range(len(line) - k + 1):
                w = line[i: i + k]
                freq[w] = freq.get(w, 0) + 1
    s = set()
    for w in freq:
        l = len(w)
        for i in range(l):
            for j in range(i + 1, l + 1):
                x = w[i: j]
                if j - i < l and freq[x] <= freq[w]:
                    s.add(x)
        if freq[w] == 1:
            s.add(w)

#     return sorted(filter(is_word, freq.keys()))
    return sorted(filter(lambda w: w not in s, filter(is_word, freq.keys())))

def model_read(filename):
    try:
        fin = open(filename, 'r')
    except IOError:
        return None
    words = []
    lbv = []
    rbv = []
    ibv = []
    for line in fin:
        tokens = line.strip().split()
        words.append(tokens[0])
        lbv.append(float(tokens[1].lstrip('lbv:')))
        rbv.append(float(tokens[2].lstrip('rbv:')))
        ibv.append(float(tokens[3].lstrip('ibv:')))
    fin.close()
    return (words, lbv, rbv, ibv)

def model_write(filename, words, lbv, rbv, ibv):
    fout = open(filename, 'w')
    for w, l, r, i in zip(words, lbv, rbv, ibv):
        fout.write('%-11s %16.12f %16.12f %16.12f\n' % (w, l, r, i))
    fout.close()

def mutual_information_model(corpus):
    pc1 = {}
    pc2 = {}

    for line in corpus:
        line = line.strip()
        for c in line:
            pc1[c] = pc1.get(c, 0) + 1

        for i in range(len(line) - 1):
            c = line[i: i + 2]
            pc2[c] = pc2.get(c, 0) + 1

    norm1 = sum(pc1.values()) * 1.0
    norm2 = sum(pc2.values()) * 1.0

    for c in pc1:
        pc1[c] = math.log(pc1[c] / norm1, 2.0)

    for c in pc2:
        pc2[c] = math.log(pc2[c] / norm2, 2.0)

    for a, b in pc2:
        pc2[a + b] = pc2[a + b] - pc1[a] - pc1[b]

    return pc2

def ibv_word(mi_model, w):
    if len(w) == 1:
        return IBV_SINGLE
    else:
        return min(mi_model[w[i: i + 2]] for i in range(len(w) - 1))

def uniq(a):
    return list(set(a))

class LinkStructure:
    pass

def build_links(corpus):
    words = retrieve_words(corpus)
    n = len(words)
    index = { words[i]: i for i in range(n) }
    linkl = [[] for i in range(n)]
    linkr = [[] for i in range(n)]
    sharpl = [False] * n
    sharpr = [False] * n
    for line in corpus:
        line = line.strip()
        l = len(line)
        for i in range(l):
            for j in range(i + 1, min(i + WINSIZE + 1, l)):
                if line[i: j] not in index: continue
                x = index[line[i: j]]
                for k in range(j + 1, min(j + WINSIZE + 1, l + 1)):
                    if line[j: k] not in index: continue
                    y = index[line[j: k]]
                    linkl[y].append(x)
                    linkr[x].append(y)
        for i in range(1, min(WINSIZE + 1, l + 1)):
            if line[: i] not in index: continue
            x = index[line[: i]]
            sharpl[x] = True
        for i in range(l - 1, max(l - WINSIZE - 1, -1), -1):
            if line[i: ] not in index: continue
            x = index[line[i: ]]
            sharpr[x] = True
    for x in range(n):
        linkl[x] = set(linkl[x])
        linkr[x] = set(linkr[x])

    lstruct = LinkStructure()
    lstruct.words = words
    lstruct.linkl = linkl
    lstruct.linkr = linkr
    lstruct.sharpl = sharpl
    lstruct.sharpr = sharpr
    return lstruct

def normalize(a):
    norm = math.sqrt(sum(x * x for x in a))
    for i in range(len(a)):
        a[i] /= norm

def normalize1(a):
    norm = sum(a)
    for i in range(len(a)):
        a[i] /= norm

def exterior_boundary_values(lstruct):
    words = lstruct.words
    linkl = lstruct.linkl
    linkr = lstruct.linkr
    sharpl = lstruct.sharpl
    sharpr = lstruct.sharpr
    sl = len(filter(None, sharpl))
    sr = len(filter(None, sharpr))
    n = len(words)
    lbv = [1.0 / n] * n
    rbv = [1.0 / n] * n
    kill = [0] * n
    for i in range(1, EBV_ITERATIONS + 1):
        lbv1 = lbv
        rbv1 = rbv
        lbv = [0.0] * n
        rbv = [0.0] * n
        dl = [set() for x in range(n)]
        dr = [set() for x in range(n)]
        for x in range(n):
            if kill[x]:
                continue
            for y in linkl[x]:
                rbv[y] += lbv1[x] / len(linkl[x]) * EBV_LEARNING_RATE
            for y in linkr[x]:
                lbv[y] += rbv1[x] / len(linkr[x]) * EBV_LEARNING_RATE
            if lbv1[x] * rbv1[x] < EBV_EPS / n ** 2:
                kill[x] = 1
                for y in linkl[x]:
                    dr[y].add(x)
#                     linkr[y].remove(x)
                for y in linkr[x]:
                    dl[y].add(x)
#                     linkl[y].remove(x)
                continue
            lbv[x] += (1.0 - EBV_LEARNING_RATE) * lbv1[x] if linkl[x] else lbv1[x]
            rbv[x] += (1.0 - EBV_LEARNING_RATE) * rbv1[x] if linkr[x] else rbv1[x]
            if sharpl[x]:
                lbv[x] += EBV_SHARP_ADDITION
            if sharpr[x]:
                rbv[x] += EBV_SHARP_ADDITION
        for x in range(n):
            lbv[x] /= (1.0 + EBV_SHARP_ADDITION * sl)
            rbv[x] /= (1.0 + EBV_SHARP_ADDITION * sr)
        for x in range(n):
            linkl[x] -= dl[x]
            linkr[x] -= dr[x]
        print 'iteration %d..' % i
    words = [words[i] for i in range(n) if not kill[i]]
    lbv = [lbv[i] for i in range(n) if not kill[i]]
    rbv = [rbv[i] for i in range(n) if not kill[i]]
    return (words, lbv, rbv)

def test(corpus, origin, words, wr, fout_path = None):
    fout = open(fout_path, 'w') if fout_path is not None else None
    wr_dict = dict(zip(words, wr))
    total_matches = total_receives = total_answer = 0
    for raw, ans in zip(corpus, origin):
        outs = segmentation(raw.strip(), wr_dict)
        anss = ans.split()
        total_matches += longest_common_subsequence(outs, anss)
        total_receives += len(outs)
        total_answer += len(anss)
        if fout is not None:
            print >> fout, 'Answer:', ans.strip()
            print >> fout, 'Output:', ' '.join(outs)
            print >> fout
    f_score = (total_matches * 2.0) / (total_receives + total_answer)
    if fout is not None:
        print >> fout, 'F-Score: %.4f' % f_score
        fout.close()
        print "test result saved to `%s'" % TEST_RESULT_PATH
    return f_score

def segmentation(raw, wr_dict):
    l = len(raw)
    best = [1.0] + [-1.0] * l
    prev = [-1] * (l + 1)
    for x in range(l):
        for y in range(x + 1, min(x + WINSIZE + 1, l + 1)):
            w = raw[x: y]
            r = wr_dict.get(w, 0.0)
            if best[y] < best[x] * r:
                best[y] = best[x] * r
                prev[y] = x
    stack = []
    y = l
    while prev[y] != -1:
        x = prev[y]
        stack.append(raw[x: y])
        y = x
    return list(reversed(stack))

def longest_common_subsequence(a, b):
    n = len(a)
    m = len(b)
    if n == m and a == b:
        return n
    dp = [[0] * (m + 1) for i in range(n + 1)]
    for i in range(1, n + 1):
        for j in range(1, m + 1):
            dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
            if a[i - 1] == b[j - 1]:
                dp[i][j] = max(dp[i][j], 1 + dp[i - 1][j - 1])
    return dp[n][m]

if __name__ == '__main__':
    main()
