from javalang import tokenizer
import re
from config.Config import MAX_CODE_LENGTH, MIN_NL_LENGTH, MAX_NL_LENGTH
from typing import List

NUM_TYPE = {tokenizer.Integer,
            tokenizer.BinaryInteger,
            tokenizer.DecimalInteger,
            tokenizer.DecimalFloatingPoint,
            tokenizer.FloatingPoint,
            tokenizer.HexFloatingPoint,
            tokenizer.HexInteger,
            tokenizer.OctalInteger}

LITERAL_TYPE = {tokenizer.Character,
                tokenizer.Literal,
                tokenizer.String,
                tokenizer.Identifier}


def node_split(s: str, split_token: bool = True) -> List[str]:
    """
    对抽象语法树的一个节点进行分析
    :param s: 抽象语法树中节点的内容
    :param split_token: 是否分割成更小的词汇，比如javaLang分成java lang
    :return:
    """
    s = re.sub(r"\d+\.\d+\S*|0[box]\w*|\b\d+[lLfF]\b", " num ", s)
    s = re.sub(r"%\S*|[^A-Za-z0-9\s]", " ", s)
    s = re.sub(r"\b\d+\b", " num ", s)
    if split_token:
        s = re.sub(r"[a-z][A-Z]", lambda x: x.group()[0] + " " + x.group()[1], s)
        s = re.sub(r"[A-Z]{2}[a-z]", lambda x: x.group()[0] + " " + x.group()[1:], s)
        s = re.sub(r"\w{32,}", " ", s)  # MD5, hash
        s = re.sub(r"[A-Za-z]\d+", lambda x: x.group()[0] + " ", s)
    s = re.sub(r"\s(num\s+){2,}", " num ", s)
    return s.lower().split()


def num2wordlist(token, split_token: bool = True) -> List[str]:
    """
    :param token: javalang.tokenizer中的一个类型
    :param split_token 是否分割成更小的词汇，比如javaLang分成java lang
    :return: 分词后的列表
    """
    return ['num', ]


def literal2wordlist(token, split_token: bool = True) -> List[str]:
    """
    :param token: javalang.tokenizer中的一个类型
    :param split_token 是否分割成更小的词汇，比如javaLang分成java lang
    :return: 分词后的列表
    """
    return node_split(token.value, split_token)


def code_split(code: str, split_token: bool = True, max_tokens=MAX_CODE_LENGTH) -> List[str]:
    """
    对一段代码进行分词
    :param code: 代码，字符串类型
    :param split_token: bool值，是否分割成更小的词汇，比如javaLang分成java lang
    :param max_tokens: 分词的最大长度
    :return: 分词后的词列表
    """
    tokens = list(tokenizer.tokenize(code))
    result = []
    for token in tokens:
        if token.__class__ in NUM_TYPE:
            result += num2wordlist(token)
        elif token.__class__ in LITERAL_TYPE:
            result += node_split(token.value, split_token)
        else:
            result.append(token.value)
    return result[:max_tokens]


def nl_split(s: str, split_token: bool = True, min_tokens=MIN_NL_LENGTH, max_tokens=MAX_NL_LENGTH) -> List[str]:
    """
    对一段自然语言进行分词
    :param s: 自然语言
    :param split_token: bool值，是否分割成更小的词汇，比如javaLang分成java lang
    :param min_tokens: 最小分词长度
    :param max_tokens: 最大分词长度
    :return: 分词后的词列表
    """
    if len(s) < min_tokens:
        return []
    s = re.sub(r"\([^\)]*\)|(([eE]\.[gG])|([iI]\.[eE]))\..+|<\S[^>]*>", " ", s)
    #    brackets; html labels; e.g.; i.e.
    s = re.sub(r"\d+\.\d+\S*|0[box]\w*|\b\d+[lLfF]\b", " num ", s)
    first_p = re.search(r"[\.\?\!]+(\s|$)", s)
    if first_p is not None:
        s = s[:first_p.start()]
    s = re.sub(r"https:\S*|http:\S*|www\.\S*", " url ", s)
    s = re.sub(r"\b(todo|TODO)\b.*|[^A-Za-z0-9\.,\s]|\.{2,}", " ", s)
    s = re.sub(r"\b\d+\b", " num ", s)
    s = re.sub(r"([\.,]\s*)+", lambda x: " " + x.group()[0] + " ", s)

    if split_token:
        s = re.sub(r"[a-z][A-Z]", lambda x: x.group()[0] + " " + x.group()[1], s)
        s = re.sub(r"[A-Z]{2}[a-z]", lambda x: x.group()[0] + " " + x.group()[1:], s)
        s = re.sub(r"\w{32,}", " ", s)  # MD5
        s = re.sub(r"[A-Za-z]\d+", lambda x: x.group()[0] + " ", s)
    s = re.sub(r"\s(num\s+){2,}", " num ", s)
    s = s.lower().split()
    return s[:max_tokens]


if __name__ == '__main__':
    s1 = nl_split("I'm a student from ecust", True)
    s2 = code_split("Int javaLang = int 3;", False)
    print(s1)
    print(s2)
