import re
from collections import defaultdict

def tokenize(text):
    return re.findall(r'\w', text.lower())

def build_ngrams(tokens, n):
    ngrams = zip(*[tokens[i:] for i in range(n)])
    return [''.join(ngram) for ngram in ngrams]

def find_new_words(corpus, n=7, min_frequency=2):
    tokens = tokenize(corpus)
    print(tokens)
    ngrams = build_ngrams(tokens, n)
    print(ngrams)
    ngram_freq = defaultdict(int)
    for ngram in ngrams:
        ngram_freq[ngram] += 1

    new_words = [ngram for ngram, freq in ngram_freq.items() if freq >= min_frequency]
    return new_words

corpus = "bbrfmessagemessagemsgbbrf"
new_words = find_new_words(corpus, n=7, min_frequency=2)
print(new_words)
