#!/usr/bin/python3

# Copyright (c) 2011 Michael Mol <mikemol@gmail.com>.
#
# This work is licensed under a
# Creative Commons Attribution 3.0 Unported License, viewable at
# http://creativecommons.org/licenses/by/3.0/
#

import random
import re
import string

#--Massagers
# Trying to be able to handle the weird symbols we'll frequently see in highly-
# technical channels like #gentoo, #linux, etc.
PUNCTUATION = ['!','@','#','$','%','^','&','*','(',')', # Number-row symbols.
               '0','9', # Numbers. (lump 0s and 9s; they're most likely to repeat.
               '-','_','=','+','{','}','[',']',';',"'",  # More symbols
               ':','"','<',',','>','.','?','/','|','\\', # More symbols
               '`','~'] # Almost forgot about these.

def punc_in(indata):
    global PUNCTUATION
    for sym in indata:
        res = [] # accumulated symbols derived from this one.
        accum_punc = '' # accumulated pieces of the symbol (punctuation bits)
        accum_nonpunc = '' # accumulated pieces of the symbol (non-punctuation bits)

        # Symbols 3 or smaller with punctuation are very often sideways-smileys
        # like o_o. It's a hack, but we'll just emit anything this small
        # without breaking it up. Otherwise, I end up with things like
        # o_o_o_o_o_o_o_o_o_o_o after processing markov output.
        if len(sym) <= 3:
            res.append(sym)
        else:
            for char in sym:
                if char in PUNCTUATION:
                    # It's a piece of punctuation. Add it to accum_punc.
                    accum_punc += char
                    # If there's any non-punctuation accumulated,  emit it. We're
                    # accumulation punctuation marks, now.
                    if len(accum_nonpunc) > 0:
                        res.append(accum_nonpunc)
                        accum_nonpunc = ''
                else:
                    # It's not punctuation; add it to accum_nonpunc.
                    accum_nonpunc += char
                    # If there's any punctuation accumulated, emit it; we're
                    # switching over to accumulating non-punctuation, now.
                    if len(accum_punc) > 0:
                        res.append(accum_punc)
                        accum_punc = ''
    
            # We'll only have something in accum_punc *or* accum_nonpunc.
            if len(accum_punc) > 0:
                res.append(accum_punc)
            else:
                res.append(accum_nonpunc)

        # Final empty string to indicate the end of the processed symbol.
        res.append("")
        for sym_component in res:
            yield sym_component

def punc_out(indata):
    our_punctuation = []
    retsym = None
    for sym in indata:
        if not retsym:
            retsym = sym
        elif sym == "":
            yield retsym
            retsym = None
        else:
            retsym = retsym + sym
            
    if retsym:
        yield retsym

# Replaces all incoming URLs with 'http://www.example.com/'.
def url_filter_in(indata):
    # Take a stab at catching things which end in 2-, 3- or 3-char TLDs, by
    # looking for ".tld" followed by something not allowed in a tld.
    # They should match the regex:
    # [^.]\.[A-z]{2,4}($|[^A-z])
    mtch = r'[^.]\.[A-z]{2,4}($|[^A-z].*)'
    regexSearch = r'[,{}]'
    # Not matching {2} [0-9] at the end, because where we use it, we'll be
    # missing a trailing character.
    timeSearch = r'[0-9]{1,2}:[0-9]{2}:[0-9]{1,2}'

    urlSwap = 'http://www.example.com/'
    posixSwap = '/dev/null'
    windowsSwap = 'C:\\TEMP'
    timeSwap = '23:59:59'
    regexSwap = '(.){2,}'
    for sym in indata:
        # Look for '://'. That's going to be in the vast majority of URLs we stand
        # a chance of reliably recognizing.
        if -1 < sym.find('://'):
            yield urlSwap
            continue

        if re.search(mtch, sym):
            # However, that tends to catch things like /blah/whatever.tar.gz
            # So we need to check what came before our match.
            sym2 = re.sub(mtch, '', sym)

            # Consider that our regex is designed to catch the tail-end of a
            # domain name. Anything that comes before must be part of the
            # domain name (unless we've got some wrapping punctuation, but
            # perhaps we'll get to that later.
            # Very quickly, if anything in sym2 is NOT in [A-z0-9.-], it's not
            # a valid domain name.
            if re.search('[^A-z0-9]', sym2):
                # Not a valid domain name. Don't know what it is yet, but
                # it's going to require special processing, or none at all.
                if re.search(timeSearch, sym2):
                    yield timeSwap
                    continue
            
                if -1 < sym2.find('/'):
                    # This generally catches things like POSIX paths.
                    # It also catches regexes, though. To detect a regex,
                    # We'll look for commonly-used regex characters that aren't
                    # commonly used in POSIX paths.
                    if re.search(regexSearch, sym2):
                        yield regexSwap
                        continue
                    else:
                        yield posixSwap
                        continue
                elif -1 < sym2.find('\\'):
                    # This generally catches Windows paths
                    yield windowsSwap
                    continue
                else:
                    # Assuming it's a URL.
                    yield urlSwap
                    continue
                yield sym
                continue
            else:
                yield urlSwap
                continue
        # This isn't a symbol we care to switch in.
        yield sym

# Domain-like things to substitute in.
SUBSTITUTE_URLS = [ 
        'dastoob.net',
        'rosettacode.org',
        'duckduckgo.com',
        'command.com'
    ]

# Substitute paths for POSIX-type paths. (using '/' as a path delimeter)
SUBSTITUTE_PATHS_POSIX = [
        '/dev/null',
        '/dev/sda',
        '/etc/fstab',
        '/bin/sh'
    ]

# Substitute paths for DOS/Windows-type paths. (Using '\' as a path delimeter)
SUBSTITUTE_PATHS_WINDOWS = [
        'C:\\temp',
        '\\\\localhost\\C$',
        'A:\\CONFIG.SYS',
        'A:\\AUTOEXEC.BAT'
    ]

def url_filter_out(indata):
    # Note that we're assuming our input looks like the output from
    # url_filter_in. That filter does a lot more work and simplifies the kind
    # of symbols we're interested in, making them much more easily recognized.
    global SUBSTITUTE_URLS
    global SUBSTITUTE_PATHS_POSIX
    global SUBSTITUTE_PATHS_WINDOWS
    for sym in indata:
        swapSet = None
        if -1 < sym.find('://'):
            swapSet = SUBSTITUTE_URLS
        elif -1 < sym.find('/'):
            # This looks like a POSIX path.
            swapSet = SUBSTITUTE_PATHS_POSIX
        elif -1 < sym.find('\\'):
            # This looks like a Windows path.
            swapSet = SUBSTITUTE_PATHS_WINDOWS
        else:
            # Yield the raw symbol; we're not doing anything to it.
            yield sym
            # Move on to the next symbol.
            continue

        i = random.randint(0,len(swapSet) - 1)
        yield swapSet[i]

# Interjections and silence fillers.
INTERJECTIONS = [
        'hey!',
        '*ping*',
        '*cough*',
        'hi.',
        'sigh...',
        'frell...'
    ]
def attention_filter_in(indata):
    global INTERJECTIONS
    # In IRC conversations, people are often addressed in the for of:
    # (username): message.
    # We don't care about the (username:) part, so we'll replace it with an
    # interjection of some kind.
    yielded = None
    for sym in indata:
        if not re.match("\S+:", sym) or yielded:
            yielded = sym
            yield sym
        else:
            i = random.randint(0,len(INTERJECTIONS) - 1)
            yield INTERJECTIONS[i]

def de_stretch_speeeech_in(indata):
    # YEEEEAAAAHHHH!
    # HAHAHAHAHAHAHA!
    # lololololololol
    # ...don't tell me those aren't annoying. Well, perhaps they aren't in
    # some cases, but when a symbol is nothing but a repetitive, emphatic
    # extension of a smaller symbol, it bloats our symbol and relationship
    # count and dilutes our tables.
    #
    # So we're going to filter. We're not going to strip all of the emphasis,
    # we're just going to contract it down to a much lower level.
    #
    # Any time we see a one- or two-character sequence repeated three or more
    # times, we'll reduce it to just three repeats. So...
    # YEEEAAAHHH! <-- YEEEEAAAAHHHH!
    # YEEEAAAHHH! <-- YEEEEEEEEEEEEEEEEEEAAAAAAAAAAAAAAHHHHHHHHHH!
    # HAHAHA! <-- HAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHA
    # lololol <-- lolololololololololololololololololol
    #
    # Yes, some people talk online that way for some reason. This seems a
    # reasonable compromise between preserving the expressiveness of the words
    # while avoiding adding a bunch of low-weight, low-relationahip symbols.
    #
    # Time for more regex.
    for sym in indata:
        yield re.sub(r'((.|..)\2{2,})', r'\2\2\2', sym)

def case_tolower_in(indata):
    # Drop everything to lower case.
    for sym in indata:
        yield sym.lower()

def numbers_reduce_in(indata):
    # Make all numeric digits nines.
    # We'll randomize them on the way out.
    for sym in indata:
        # This feels *very* dangerous; I don't trust how it's going to interact
        # with Unicode strings, but darned if I know any other fast way to
        # convert all numerals to nines.
        yield sym.translate(str.maketrans("0123456789","9999999999"))

def numbers_reduce_out_rand_num_replace(ingroup):
    # We assume we got a numeric digit for an argument. Replace it with a
    # random digit.
    return str(random.randint(0,9))

def numbers_reduce_out(indata):
    for sym in indata:
        yield re.sub(r'[0-9]',numbers_reduce_out_rand_num_replace,sym)

if __name__ == '__main__':
    # Doesn't appear to do much, but it exercises whatever filters you feed it.
    # The most useful tracing turns out to be done inside the filters
    # themselves.
    while True:
        line = None
        try:
            line = input()
        except:
            break
        if line == None:
            break

        # Initial input
        inset = line.split()
        infiltered = url_filter_in(inset)

        # If you want to do tracing, you'll need to pull it out of inter_filtered.
        inter_filtered = []
        for symb in infiltered:
            inter_filtered.append(symb)
        
        outfiltered = url_filter_out(inter_filtered)
        inter_filtered = []
        for symb in outfiltered:
            inter_filtered.append(symb)


