import champollion, text_crawl, EnglishToken, ChineseToken
from collections import defaultdict
from laserembeddings import Laser
import math
import numpy as np

ENCODING = "utf8"
xtoyc = 0.78
MINS = -10
penalty01 = 0.8
laser = Laser()


score = {}
stop_dic, trans_dic = defaultdict(str), defaultdict(str)
stop_filename = "./lib/E.stoplist.txt"
dict_filename = "./lib/ecdict.gb.txt"

zh_st, en_st, len_zh, len_en = [], [], {}, {}
zh_token, zh_token2stat, en_token, en_token2stat = defaultdict(int), defaultdict(int), defaultdict(int), defaultdict(
    int)

stop_dic = champollion.load_stop(stop_filename, stop_dic)
trans_dic = champollion.load_dict(dict_filename, stop_dic, trans_dic)

"""
    给定html文件
"""

text = text_crawl.crawl_all_text("./html_file")
clear_text = text_crawl.clear_txt(text)
zh_list, en_list = text_crawl.convert_zh_en(clear_text)

tokenize_zh = ChineseToken.tokenize_zh(zh_list)
tokenize_en = EnglishToken.tokenize_en(en_list)

zh_st, len_zh, zh_token = champollion.load_axis(tokenize_zh, zh_st, len_zh, zh_token, zh_token2stat, stop_dic)
en_st, len_en, en_token = champollion.load_axis(tokenize_en, en_st, len_en, en_token, en_token2stat, stop_dic)
n_zh, n_en = len(zh_st), len(en_st)

zh_en_ratio = n_en / n_zh

win_per_100 = 8
min_win_size = 10
max_win_size = 600
w1_size = int(zh_en_ratio * n_en * win_per_100 / 100)
w2_size = int(abs(n_en - n_zh) * 3 / 4)
window_size = min(max(min_win_size, max(w1_size, w2_size)), max_win_size)

"""
    设置分数
"""


def set_score(x, y, d):
    score[str(x) + "," + str(y)] = d


"""
    获取分数
"""


def get_score(x, y):
    if str(x) + "," + str(y) in score.keys():
        return score[str(x) + "," + str(y)]
    else:
        return 0


def match_sentences_lex(x, y, x_sentence, y_sentence, token_stat):
    min_pairs, score = 1, 0
    x_list, y_list = x_sentence.split(), y_sentence.split()
    x_tokens, y_tokens = defaultdict(int), defaultdict(int)
    for i in x_list:
        x_tokens[i] += 1
    for i in y_list:
        y_tokens[i] += 1
    x_total_tokens = token_stat["total"]
    for i in x_tokens.keys():
        if i in y_tokens.keys() and i not in stop_dic.keys():
            score += math.log((x_total_tokens / token_stat[i]) * min(x_tokens[i], y_tokens[i] + 1))
        else:
            for trans in trans_dic[i].split(" "):
                if trans in y_tokens.keys():
                    min_pairs = min(x_tokens[i], y_tokens[trans])
                    if min_pairs == 0:
                        continue
                    score += math.log(x_total_tokens / token_stat[i] * min_pairs + 1)
                    x_tokens[i] -= min_pairs
                    y_tokens[trans] -= min_pairs
                    break
    return score


"""
    句子匹配
"""


def match_sentences(map):
    x_len, y_len = 0, 0
    length_penalty = 1
    x, y = map.split("-")
    x, y = x.split(), y.split()

    nx, ny = len(x), len(y)
    if nx == 0 or ny == 0 or y[0] == "":
        return -0.1

    x_sentence = en_st[int(x[0])]
    y_sentence = zh_st[int(y[0])]
    score = match_sentences_lex(x, y, x_sentence, y_sentence, en_token)
    x_len += len_en[int(x[0])]
    y_len += len_zh[int(y[0])]
    if max(x_len, y_len / xtoyc) > 60:
        length_penalty = math.log10(6 + 4 * min(x_len * xtoyc, y_len) / max(x_len * xtoyc, y_len))
    return score * length_penalty


"""
    对齐
"""


def align(n_en, n_zh):
    ralign = defaultdict(str)
    path_x, path_y = defaultdict(int), defaultdict(int)
    zh_en_ratio = n_en / n_zh
    for j in range(n_zh + 1):
        center = int(j * zh_en_ratio)
        window_start = center - window_size if center - window_size > 0 else 0
        window_end = center + window_size if center + window_size < n_en else n_en
        for i in range(window_start, window_end + 1):
            im1, jm1 = i - 1, j - 1
            s1 = get_score(i - 1, j - 1) + match_sentences(str(im1) + " - " + str(jm1)) if i > 0 and j > 0 else MINS
            s2 = get_score(i - 1, j) + match_sentences(str(im1) + " -") if i > 0 else MINS
            s3 = get_score(i, j - 1) + match_sentences("- " + str(jm1)) if j > 0 else MINS
            smax = max(s1, s2, s3)

            if smax == MINS:
                set_score(i, j, 0)
            elif smax == s1:
                set_score(i, j, s1)
                path_x[str(i) + "," + str(j)] = i - 1
                path_y[str(i) + "," + str(j)] = j - 1
            elif smax == s2:
                set_score(i, j, s2)
                path_x[str(i) + "," + str(j)] = i - 1
                path_y[str(i) + "," + str(j)] = j
            elif smax == s3:
                set_score(i, j, s3)
                path_x[str(i) + "," + str(j)] = i
                path_y[str(i) + "," + str(j)] = j - 1

    n = 0
    i, j = n_en, n_zh
    while i > 0 or j > 0:
        oi, oj = path_x[str(i) + "," + str(j)], path_y[str(i) + "," + str(j)]
        n += 1
        si, sj = i - oi, j - oj
        if si == 1 and sj == 1:
            ralign[n] = str(i) + " <=> " + str(j)
        elif si == 1 and sj == 0:
            ralign[n] = str(i) + " <=> omitted"
        elif si == 0 and sj == 1:
            ralign[n] = "omitted <=> " + str(j)
        i, j = oi, oj
    return ralign


def write_align(ralign):
    align = open("./result/align.txt", "w", encoding=ENCODING)
    for i in sorted(ralign, reverse=True):
        if "omitted" not in ralign[i]:
            sentence_index = ralign[i].split()
            zh_sentence = zh_list[int(sentence_index[-1]) - 1]
            en_sentence = en_list[int(sentence_index[0]) - 1]
            embeddings = laser.embed_sentences(
                [zh_sentence,
                 en_sentence],
                lang=['zn', 'en'])
            norms_a = np.linalg.norm(embeddings[0])
            norms_b = np.linalg.norm(embeddings[1])
            sim = (np.matmul(embeddings[0], embeddings[1])) / (norms_a * norms_b)
            if sim > 0.65:
                align.write(en_sentence + "\t" + zh_sentence + "\n")
    align.close()


ralign = align(n_en, n_zh)
write_align(ralign)

