import nltk
from nltk import pos_tag
from nltk.tokenize import word_tokenize
from nltk.chunk import RegexpParser

def split_sentence_to_phrases(sentence, integrate_single_words=False):
    '''
    功能：
        基于句法分析将句子切割成短语
    参数：
        sentence:str
            输入的句子
        integrate_single_words = False:bool
            是否合并连续的单独的词汇
    样例：
        输入："I would like to play basketball rather than tennis"
        输出：["I","would like","to","play basketball","rather than","tennis"]
    '''
    # 使用word_tokenize进行单词切割
    tokenized_sentence = word_tokenize(sentence)

    # 使用pos_tag获取每个单词的词性
    tagged_sentence = pos_tag(tokenized_sentence)

    # 定义名词短语的正则表达式模式
    grammar = "NP: {<DT>?<JJ>*<NN>}"

    # 使用RegexpParser进行句子切割
    chunk_parser = RegexpParser(grammar)

    # 对标记的句子进行切割
    chunked_sentence = chunk_parser.parse(tagged_sentence)

    # 从切割后的结构中提取短语文本
    phrases = []
    current_phrase = []

    for word_pos in chunked_sentence:
        if type(word_pos) is nltk.Tree:  # 如果是名词短语
            if current_phrase:  # 如果当前短语非空，先添加到phrases中
                phrases.append(' '.join(current_phrase))
                current_phrase = []  # 重置当前短语
            phrases.append(' '.join(word for word, _ in word_pos.leaves()))  # 添加名词短语
        else:
            word, pos = word_pos
            if pos.startswith('V') or pos in ['IN', 'WHO']:  # 如果是动词或介词或who
                if current_phrase:  # 如果当前短语非空，先添加到phrases中
                    phrases.append(' '.join(current_phrase))
                    current_phrase = []  # 重置当前短语
                phrases.append(word)  # 直接添加单词作为短语
            else:
                current_phrase.append(word)  # 否则添加到当前短语中

    # 检查并添加最后一个短语
    if current_phrase:
        phrases.append(' '.join(current_phrase))

    # 如果需要，合并连续的单个词为一个短语
    if integrate_single_words:
        integrated_phrases = []
        current_phrase = []
        for phrase in phrases:
            if ' ' in phrase:  # 如果是短语
                if current_phrase:  # 如果当前短语非空，先添加到integrated_phrases中
                    integrated_phrases.append(' '.join(current_phrase))
                    current_phrase = []  # 重置当前短语
                integrated_phrases.append(phrase)  # 直接添加短语
            else:
                current_phrase.append(phrase)  # 否则添加到当前短语中
        # 检查并添加最后一个短语
        if current_phrase:
            integrated_phrases.append(' '.join(current_phrase))
        return integrated_phrases

    return phrases

def count_words_in_phrases(phrases):
    '''
    功能：
        计算每个短语的词数量，组成词量列表
    参数：
        phrase:list
            短语列表
    样例：
        输入：['I like cat','more than','dog']
        输出：[3,2,1]
    '''
    
    word_counts = [len(word_tokenize(phrase)) for phrase in phrases]
    return word_counts

def cumulative_sum_with_skip(numbers,jump=2):
    '''
    功能：
        组成增量词位置列表，合并词汇量较少的短语到下一个短语中
    参数：
        numbers:list
            来自count_words_in_phrases的返回值，短语词汇数列表
        jump:int(default=2)
            被合并的短语最大长度，默认为2，即2个词以下的短语都会被合并
    样例：
        输入：[2, 3, 1, 2, 4, 2, 3]
        输出：[2, 5, 8, 12, 14, 17]
    '''
    result = []
    current_sum = 0
    skip_next = False  # 标记是否跳过下一个数字的累加

    for i, number in enumerate(numbers):
        if skip_next:
            skip_next = False  # 重置跳过标记
            continue
        current_sum += number
        if number < jump:
            # 如果当前数字小于2且不是最后一个元素，则累加下一个数字
            if i + 1 < len(numbers):
                current_sum += numbers[i + 1]
                skip_next = True  # 设置跳过下一个数字的累加
        result.append(current_sum)

    return result

def get_split_pos(sentence,jump=2):
    '''
    功能：
        直接从句子中得到增量词位置列表
    参数：
        sentence:str
            待处理的句子
        jump:int(default=2)
            被合并的短语最大长度，默认为2，即2个词以下的短语都会被合并
    样例：
        输入："we have indeed taken the best part of the meat so let 's look today at a set of photographs of a people who lost so that we could gain"
        输出：[4, 6, 8, 10, 16, 18, 21, 24, 27, 29, 30]
        输出(jump=3):[4, 8, 16, 21, 24, 27, 30]
    '''
    pharses = split_sentence_to_phrases(sentence=sentence,integrate_single_words=True)
    counts = count_words_in_phrases(pharses)
    out = cumulative_sum_with_skip(counts,jump)
    return out