# -*- coding: utf-8 -*-
"""
Created on Fri Apr  8 21:19:56 2022

@author: Zhou
"""

import re
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from itertools import chain

subtoken = True
text_minlen = 3
text_maxlen = 20
stopvocab = set(stopwords.words('english'))


def nl_filter(s, subtoken=True, min_tokens=0, max_tokens=1000):
    """
    Split an input sentence to words.

    :param s input sentence
    :param subtoken whether to split words to sub words, such as split "JavaDoc" to ["java", "doc"]
    :param min_tokens min tokens to keep
    :param max_tokens max tokens to keep
    :return a list of words
    """
    s = re.sub(r"(([eE]\.[gG])|([iI]\.[eE]))\..+|<\S[^>]*>", " ", s)  # remove HTML labels; e.g.; i.e.
    s = re.sub(r"\d+\.\d+\S*|0[box]\w*|\b\d+[lLfF]\b", " num ", s)
    first_p = re.search(r"[\.\?\!]+(\s|$)", s)
    if first_p is not None:
        s = s[:first_p.start()]
    s = re.sub(r"https:\S*|http:\S*|www\.\S*", " url ", s)
    s = re.sub(r"@\w+|[^A-Za-z0-9:\.,\s]|\.{2,}", " ", s)
    s = re.sub(r"\b\d+\b", " num ", s)
    s = re.sub(r"([:\.,]\s*)+", lambda x: " " + x.group()[0] + " ", s)

    if subtoken:
        s = re.sub(r"[a-z][A-Z]", lambda x: x.group()[0] + " " + x.group()[1], s)
        s = re.sub(r"[A-Z]{2}[a-z]", lambda x: x.group()[0] + " " + x.group()[1:], s)
        s = re.sub(r"\w{32,}", " ", s)  # MD5
        s = re.sub(r"[A-Za-z]\d+", lambda x: x.group()[0] + " ", s)
    s = re.sub(r"\s(num\s+){2,}", " num ", s)
    s = s.lower().split()
    if len(s) < min_tokens or s[0] in {':', '.', ','}:
        return []
    else:
        return s[:max_tokens]


def identify_sent(raw_doc):  # input original document
    """
    identify sentences from a raw Java doc.

    :param raw_doc a java doc string.
    :return sentences list.
    """
    raw_doc = re.sub(r"\([^\)]*\)", " ", raw_doc)  # remove content within brackets
    raw_doc = re.sub(r"[eE]\.[gG]\.\s", "e.g., ", raw_doc)
    raw_doc = re.sub(r"[iI]\.[eE]\.\s", "i.e., ", raw_doc)
    raw_doc = re.sub(r"\.<", ". <", raw_doc)  # take "." away from HTML labels
    return re.split(r"\.\s+", raw_doc)  # check if "." is followed by spaces


def is_others(raw_sent):  # input original sentence
    if re.search(r"\b(SEE|See|see)\b", raw_sent):
        return True
    # likely be example codes
    if '<pre>' in raw_sent and '<code>' in raw_sent or '<blockquote>' in raw_sent:
        return True
    if re.search(r"\{@code [^\}]{30,}", raw_sent):
        return True
    if re.search('For (instance|example)|[Ee]xample[:|,]', raw_sent):
        return True
    # typical SATDs
    if re.search(r"\b(TODO|todo|FIXME|fixme|BUG|bug|compatibility|hack)\b|xxx", raw_sent):
        return True

    return False


def is_return(tok_sent):  # expect tokenized sentence
    if tok_sent.startswith('return'):
        return True
    if re.search('(this( method| function)?|,) returns? ', tok_sent):
        return True
    if re.search('(will|will not|won t)( be)? return', tok_sent):
        return True

    return False


def is_exception(tok_sent):
    if tok_sent.startswith('throw'):
        return True
    if re.search('(this( method| function)?|,) throws? ', tok_sent):
        return True
    if re.search('(will|will not|won t)( be)? throw', tok_sent):
        return True
    return False


def is_usage(tok_sent):
    if re.search('^(used|called|invoked)', tok_sent):
        return True
    if re.search('be used |be called |be invoked |' +
                 'works only|only works|cannot|' +
                 'do not|is only valid|is valid only', tok_sent):
        return True
    return False


def get_exception(tok_sent):
    match = re.search('if |when ', tok_sent)
    if match:
        return tok_sent[match.start():]
    # match = re.search('exception ', tok_sent)
    # if match:
    #     return tok_sent[match.end():]

    return tok_sent


def get_return(tok_sent):
    match = re.search('^returns? ', tok_sent)
    if match:
        return tok_sent[match.end():]

    return tok_sent


def get_exception_name(raw_sent):  # input original sentence
    match = re.search(r"\b\w*[Ee]xception\b", raw_sent)
    return match.group() if match else 'Exception'


def tokenize_existing(doc, subtoken, min_tokens, max_tokens):
    ''' Input sdoc. Tokenize tagged sentences in the document.'''
    result = {'function': [],
              'params': {},
              'return': [],
              'exceptions': {},
              'usage': [],
              'untagged_return': False,
              'untagged_exception': False}

    if doc['returns']:
        result['return'] = nl_filter(doc['returns'], subtoken, min_tokens, max_tokens)
    if doc['exceptions']:
        for name, desc in doc['exceptions'].items():
            desc = nl_filter(desc, subtoken, min_tokens, max_tokens)
            if desc:
                result['exceptions'][name] = desc

    if doc['params']:
        for name, desc in doc['params'].items():
            desc = nl_filter(desc, subtoken, min_tokens, max_tokens)
            if desc:
                result['params'][name] = desc

    return result


def get_pos(tag):
    if tag.startswith('J'):
        return 'a'
    elif tag.startswith('V'):
        return 'v'
    elif tag.startswith('R'):
        return 'r'
    else:
        return 'n'


def is_english(doc, k=0.5):
    tokens = doc['function'] + list(chain(*doc['params'].values())) + doc['return'] + \
             list(chain(*doc['exceptions'])) + doc['usage']
    N = len(tokens)
    if N == 0:
        return False

    n = len([token for token in tokens if token not in {',', ':', '.'} and \
             (wordnet.synsets(token, lang='eng') or token in stopvocab)])
    if n / N >= k:
        return True
    else:
        return False


def split_doc(doc, subtoken, min_tokens, max_tokens):
    ''' Input sdoc. Classify untagged sentences in the document and perform tokenization.'''
    result = tokenize_existing(doc, subtoken, min_tokens, max_tokens)

    returns = []
    exceptions = {}
    usage = []
    for i, sentence in enumerate(identify_sent(doc['description'])):
        if is_others(sentence):
            continue

        tokenized = nl_filter(sentence, subtoken, min_tokens, max_tokens)
        _tokenized = ' '.join(tokenized)
        if is_return(_tokenized):
            desc = get_return(_tokenized).split()
            returns.append(desc)
        elif is_exception(_tokenized):
            desc = get_exception(_tokenized).split()
            name = get_exception_name(sentence)
            if desc:
                exceptions[name] = desc
        elif is_usage(_tokenized):
            usage.append(tokenized)
        elif i == 0:  # if the first sentence does not belong to above categories
            result['function'] = tokenized

    if usage:
        result['usage'] = usage[0]
    if not result['return'] and returns:
        result['return'] = returns[0]
        result['untagged_return'] = True
    if not result['exceptions'] and exceptions:
        result['exceptions'] = exceptions
        result['untagged_exception'] = True

    if is_english(result):
        return result
    else:
        return {}
