import io

import jieba
import langid
import spacy
from langid import langid


nlp_en = spacy.load('en_core_web_sm')
nlp_zh = spacy.load('zh_core_web_sm')


def normalize_space(s):
    s.replace("\u3000", " ")
    s.replace("\xa0", " ")


def lemma(s):
    """Lemma list for text"""
    doc = nlp_en(s)

    lemmas = []
    for token in doc:
        lemmas.append(token.lemma_)
    return lemmas


def setences(txt, lang="en"):
    nlp = nlp_en
    if lang == "zh":
        nlp = nlp_en

    doc = nlp(txt)
    return list(doc.sents)


def is_zh_char(s):
    """Is it a Chinese char"""
    return len(s) == 1 and '\u4e00' <= s[0] <= '\u9fa5'


def is_en_char(s):
    """Is it a English char"""
    return len(s) == 1 and '\u0000' < s[0] < '\u00ff'


def is_en(s):
    """All are English chars"""
    return all(map(is_en_char, s))


def load_dict(dict_fn="ecdict.gb.txt"):
    """Load EN-ZH dictionary"""
    f = io.open(dict_fn, encoding="gbk")
    dic = {}
    for line in f:
        pair = line.split("=")
        if len(pair) != 2:
            continue
        e = pair[0].strip()
        c = pair[1].strip()
        if dic.get(e) is None:
            dic[e] = [c]
        else:
            dic[e].append(c)
    return dic


def match(en_w, zh_p, ez_dict):
    """"Do the translations of English word exist in Chinese paragraph?"""
    if ez_dict.get(en_w) is None:  # en word not exists in dict
        return False

    zh_trans = ez_dict[en_w]
    for c in zh_trans:
        if zh_p.find(c) >= 0:  # translation of en word exists in Chinese paragraph
            return True
    return False


def is_bilingual(p_list):
    """Judge if a doc with paragraphs is EN-ZH bilingual or not"""
    en_len = 0.0001
    en_num = 0
    zh_len = 0.0001
    zh_num = 0
    for p in p_list:
        if langid.classify(p)[0] == "en":
            en_len += len(p.split())
            en_num += 1
        elif langid.classify(p)[0] == "zh":
            zh_len += len(list(jieba.cut(p)))
            zh_num += 1

    if en_num == 0 or zh_num == 0:
        return False

    ratio = en_len / float(zh_len)
    ratio2 = en_num / float(zh_num)

    # print(en_len, zh_len, ratio, ratio2)

    if 3 > ratio > 0.25 and 2 > ratio2 > 0.25:
        return True
    else:
        return False


def not_alnum(str):
    return all([not c.isalnum() for c in str])


def has_no_enchar(s):
    for c in s:
        if c.isalpha():
            return False
    return True


def too_short(s):
    tokens = list(jieba.cut(s))
    tokens = [t.strip() for t in tokens if len(t.strip())>0]
    return len(tokens) >= 4


def has_zh(s):
    """Does it contain Chinese char?"""
    for c in s:
        if is_zh_char(c):
            return True
    return False


def is_en(s):
    return all([c.isascii() for c in s])


def is_lang(s, lang=None):
    if lang is None:
        return True
    else:
        return lang == langid.classify(s)[0]


def split_enzh(p_list):
    en_p = []
    zh_p = []
    for p in p_list:
        if is_lang(p, "en"):
            en_p.append(p)
        else:
            zh_p.append(p)
    return en_p, zh_p


if __name__ == "__main__":
    ecdict = load_dict()
    print(ecdict)

    doc1 = ["这只是一个测试，这是中文", "这也是中文。"]
    doc2 = ["This is an English sentences. It is just a test.", "Is it English?"]
    doc3 = ["这只是一个测试，这是中文", "Is it English?"]
    print(is_bilingual(doc1), is_bilingual(doc2), is_bilingual(doc3))

    print(lemma("I am looking at the books."))

    print(match("work", "我喜欢工作。", ecdict))

    txt1 = "What can I do for you? Thank you!"
    sents = setences(txt1, "en")
    print(len(sents), sents)

    txt1 = "中国是个好地方？你觉得呢？"
    sents = setences(txt1, "zh")
    print(len(sents), sents)
