#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 by Nicolas Grandjean <ncgrandjean@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#

def configure():

    # ADD YOU RULES HERE!

    # outgoing flows
    OUTPUT()

    # outgoing icmp and dns for everyone
    OUTPUT('icmp or udp dst port 53')

    # root can join anything
    OUTPUT('ip', custom_condition='-m owner --uid-owner=0')

    # web server hosting
    #INPUT('tcp dst port 80 or tcp dst port 443')
    # or quicker, but more permissive:
    #INPUT('port 80 or port 443')

    # video streaming from a given host
    #INPUT('udp and src host myvideo.stream.com')

    # forwarding from eth0 to eth1
    #FORWARD('src dev eth0 and dst dev eth1')

    # HTTP redirect to 1.2.3.4:8888
    #DNAT('tcp dst port 80', '1.2.3.4:8888')
    #FORWARD('dst host 1.2.3.4 and tcp dst port 8888')

    #

###############################################################################
###############################################################################

# imports

import logging
import logging.handlers
import os
import pprint
import pstats
import random
import re
import socket
import sys
import traceback

try:
    from dns import resolver # need python-dnspython
    resolver = resolver.Resolver()
    resolver.lifetime = 3
except ImportError:
    pass
from pyparsing import (alphas, alphanums, Combine, Empty, Forward, Group,
                       Keyword, nestedExpr, NotAny, nums, oneOf, opAssoc,
                       operatorPrecedence, Optional, ParseException,
                       ParseResults, quotedString, StringEnd, StringStart,
                       Suppress, White, Word) # need python-pyparsing
alphanums += '-._'
from threading import currentThread, Event, RLock, Thread

def cached(function):
    """Implements a generic caching decorator."""
    cache = {}
    def wrapper(*args):
        key = args # the key must be hashable
        if key in cache:
            return cache[key]
        result = function(*args)
        cache[key] = result
        return result
        #
    return wrapper
    #

_not_concurrent_lock = RLock()
def not_concurrent(function):
    """Ensures that a given function can't be called concurrently."""
    def wrapper(*args):
        _not_concurrent_lock.acquire()
        result = function(*args)
        _not_concurrent_lock.release()
        return result
        #
    return wrapper
    #

###############################################################################
# Cached versions of usual functions
###############################################################################

@cached
def re_compile(pattern, flags=0):
    """Provides a cached version of 're.compile()'."""
    return re.compile(pattern, flags)
    #

# shortcut
r = re_compile

###############################################################################
# Logging & History
###############################################################################

class ColorFormatter(logging.Formatter):
    """Implements colored output for the logging module."""
    BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
    COLORS = {
        'WARNING' : YELLOW,
        'INFO'    : WHITE,
        'DEBUG'   : BLUE,
        'CRITICAL': YELLOW,
        'ERROR'   : RED,
        'RED'     : RED,
        'GREEN'   : GREEN,
        'YELLOW'  : YELLOW,
        'BLUE'    : BLUE,
        'MAGENTA' : MAGENTA,
        'CYAN'    : CYAN,
        'WHITE'   : WHITE,}
    COLOR_SEQ = "\033[1;%dm"
    BOLD_SEQ = "\033[1m"
    RESET_SEQ = "\033[0m"
    #
    def __init__(self, *args, **kwargs):
        """Creates a new instance."""
        # can't do super(...) here because Formatter is an old school class
        logging.Formatter.__init__(self, *args, **kwargs)
        #
    def format(self, record):
        """Formats a given record with the appropriate colors."""
        levelname = record.levelname
        color = ColorFormatter.COLOR_SEQ
        color = color % (30 + ColorFormatter.COLORS[levelname])
        message = logging.Formatter.format(self, record)
        message = message.replace("$COLOR", color)
        for key, value in ColorFormatter.COLORS.items():
            color = ColorFormatter.COLOR_SEQ % (value + 30)
            message = message.replace("$" + key, color)
            color = ColorFormatter.COLOR_SEQ % (value + 40)
            message = message.replace("$BG" + key, color)
            color = ColorFormatter.COLOR_SEQ % (value + 40)
            message = message.replace("$BG-" + key, color)
        message = message.replace("$BOLD", ColorFormatter.BOLD_SEQ)
        message = message.replace("$RESET", ColorFormatter.RESET_SEQ)
        return message + ColorFormatter.RESET_SEQ
        #
    #

@not_concurrent
def logging_by_lines(function):
    """Implements a decorator to log a given string line by line."""
    def wrapper(string):
        for line in str(string).split('\n'):
            line = line.strip()
            if len(line) > 0:
                function(line)
        #
    return wrapper
    #

@not_concurrent
def _logging_exception(exception):
    """Prints an exception."""
    # retrieve information about the current exception
    exc_type, _, exc_tb = sys.exc_info()
    exc_type = str(exc_type).partition('exceptions.')[2].rpartition('\'>')[0]
    stack = traceback.extract_tb(exc_tb)
    stack.reverse()
    for filename, lineno, function, _ in stack:
        if filename == __file__:
            break
    else:
        filename, lineno, function, _ = stack[0]
    logging_error("%s@%s: %s() => %s at line %s"
                  % (exc_type or 'Exception',
                     filename,
                     function,
                     str(exception),
                     lineno))
    # print the backtrace only in debug mode
    max_length = max([len(function) for _, _, function, _ in stack]) + 3
    fmt = "- %%s: %%-%ss%%s: %%s" % max_length
    for filename, lineno, function, line in stack:
        logging.error(fmt % (filename, function + '()', lineno, line))
    #

def _logging_print(string):
    """Prints a raw string to stderr."""
    sys.stderr.write("\033[37m%s\033[0m\n" % string)
    #

# shortcuts
logging_debug     = logging_by_lines(logging.debug)
logging_info      = logging_by_lines(logging.info)
logging_warning   = logging_by_lines(logging.warning)
logging_error     = logging_by_lines(logging.error)
logging_exception = _logging_exception
logging_print     = _logging_print

def cleanup_log_line(string):
    """Removes unwanted parts of a log line."""
    return r(r'\'_dc\': \'\d+\'').subn('', string)[0]
    #

def one_line(obj):
    """Returns the 1-line string representation of an object."""
    return r(r' *\r*\n+ *').sub(' ', str(obj)).strip()
    #

def trunc(string, max_length=50):
    """Returns the truncated value of a string."""
    default = str(string)
    if len(default) > max_length:
        result = '%s...' % default[:max_length-3].strip()
    else:
        result = default
    return result
    #

def trunc_repr(string, max_length=50):
    """Returns the truncated representation of a string."""
    default = repr(string)
    if len(default) > max_length:
        result = '%s...\'' % default[:max_length-4].strip()
    else:
        result = default
    return result
    #

###############################################################################
# Utility functions
###############################################################################

#@cached
def check_tokens(tokens, types_and_values):
    """Checks if the given tokens are of a given type and value. Returns the
    number of tokens that match."""
    try:
        re_search = r(r'\bclass\b|\btype\b').search
        for token, types_and_values in zip(tokens, types_and_values):
            if re_search(repr(types_and_values)):
                if not isinstance(token, types_and_values):
                    return 0
            else:
                if token != types_and_values:
                    return 0
        return len(tokens)
    except TypeError:
        return 0
    #

#@cached
def resolv(hostname):
    """Resolves a given hostname."""
    if r(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$').match(hostname):
        return [hostname]
    try:
        logging_info("querying name %s..." % trunc_repr(hostname))
        values = resolver.query(hostname)
        if values:
            logging_debug("name %s resolved:" % trunc_repr(hostname))
            for value in values:
                logging_debug("- %s" % trunc_repr(value))
            return [str(ip) for ip in values]
        else:
            raise ValueError("can't resolve %s" % trunc_repr(hostname))
    except:
        raise ValueError("can't resolve %s" % trunc_repr(hostname))
    #

###############################################################################
# Classes (capture filters)
###############################################################################

class Netfilter:
    """Provides static methods to manage Netfilter rules."""
    _chain_prefix = 'PY'
    _existing_chains = []
    # Public methods ##########################################################
    @staticmethod
    def apply_capture_filter(capture_filter, queue_num):
        """Generates and applies Netfilter rules from a given BPF filter. A
        rule is composed of a chain, a condition (iptables syntax) and a
        target. Rules are added to the 3 main Netfilter chains INPUT, OUTPUT
        and FORWARD with target 'NFQUEUE --queue-num <queue_num>'."""
        # create rules from the bpf filter
        logging_info("parsing capture filter %s" % trunc_repr(capture_filter))
        parser = Netfilter._capture_filter_parser()
        tokens = parser.parseString(capture_filter)
        rules = Netfilter._process_boolean(tokens, Netfilter._process_keyword)
        # we must have at least one rule
        if not rules:
            chain = Netfilter._new_chain_id()
            target = Netfilter._new_chain_id()
            rules = [(chain, '', target)]
        # apply the rules
        table = 'filter'
        last_target = 'NFQUEUE --queue-num %s' % queue_num
        for first_chain in ('INPUT', 'OUTPUT', 'FORWARD'):
            Netfilter._apply_rules(table, first_chain, rules, last_target)
        #
    @staticmethod
    def remove_rules():
        """Removes all Netfilter rules and chains related to Proxyshark."""
        logging_info("removing custom Netfilter rules")
        # retrieve the proxyshark chains
        iptables = Popen(['iptables', '-L', '-n'],
                         bufsize=-1,
                         stdin=None,
                         stdout=PIPE,
                         stderr=None)
        regex = r'\n(?:Chain )?(%s\d+) ' % Netfilter._chain_prefix
        existing_chains = r(regex).findall(iptables.stdout.read())
        if not existing_chains:
            return
        # for each existing proxyshark chain
        for current_chain in tuple(set(existing_chains)):
            # remove the rules that have the current chain as a target
            Netfilter_raw_iptables = Netfilter._raw_iptables
            for main_chain in ('INPUT', 'OUTPUT', 'FORWARD'):
                args = '-t filter -D %s -j %s' % (main_chain, current_chain)
                Netfilter_raw_iptables(args)
            # flush the current chain
            Netfilter._raw_iptables('-t filter -F %s' % current_chain)
        # remove empty chains
        Netfilter_raw_iptables = Netfilter._raw_iptables
        for current_chain in existing_chains:
            Netfilter_raw_iptables('-t filter -X %s' % current_chain)
        #
    # Built-in methods ########################################################
    #
    # Private methods #########################################################
    @staticmethod
    @cached # the generator is cached, but not '_next_chain_id()' (see below)
    def __new_chain_id():
        """Returns a generator of random Netfilter chain identifiers."""
        chain_id = None
        existing_chains = []
        while 1:
            while not chain_id or chain_id in existing_chains:
                chain_id = ''.join((Netfilter._chain_prefix,
                                    str(random.randint(1000, 9999))))
                existing_chains.append(chain_id)
                yield chain_id
        #
    @staticmethod
    def _new_chain_id():
        """Generates a new unique and random Netfilter chain identifier."""
        return Netfilter.__new_chain_id().next()
        #
    @staticmethod
    @cached
    def _capture_filter_parser():
        """Creates a parser for the BPF language."""
        # handle boolean expressions ('not', 'and', 'or')
        def Boolean(clause):
            parser = Forward()
            clause = Group(clause) | nestedExpr(content=parser)
            parser = operatorPrecedence(clause, [
                (Keyword('not'), 1, opAssoc.RIGHT),
                (Keyword('and'), 2, opAssoc.LEFT ),
                (Keyword('or' ), 2, opAssoc.LEFT ),])
            return parser
        # handle custom keywords (a wrapper used below)
        def _Keyword(prefix, keyword, value):
            parser = Empty()
            if prefix:
                prefix  = Optional(oneOf(prefix) + White())
                parser += Combine(prefix + Keyword(keyword))
            else:
                parser += Keyword(keyword)
            if value:
                parser += value
            return parser
        # create custom values
        keywords = ('not and or in out src dst '
                    'dev host net port ip icmp tcp udp')
        name     = NotAny(oneOf(keywords) + (White() | StringEnd()))
        name    += Word(initChars=alphas, bodyChars=alphanums+'-._')
        ip       = Combine(Word(nums, max=3) + '.' +
                           Word(nums, max=3) + '.' +
                           Word(nums, max=3) + '.' +
                           Word(nums, max=3))
        network  = Combine(ip + '/' + (ip | Word(nums, max=2)) |
                           ip + White() + 'netmask' + White() + ip)
        number   = Word(nums, max=5)
        # create custom keywords
        dev      = _Keyword('in out' , 'dev' , Boolean(name))
        host     = _Keyword('src dst', 'host', Boolean(ip | name))
        net      = _Keyword('src dst', 'net' , Boolean(network))
        port     = _Keyword('src dst', 'port', Boolean(number))
        proto    = _Keyword(None     , 'any' , None)
        proto   |= _Keyword(None     , 'ip'  , None)
        proto   |= _Keyword(None     , 'icmp', None)
        proto   |= _Keyword(None     , 'tcp' , Optional(port))
        proto   |= _Keyword(None     , 'udp' , Optional(port))
        keyword  = dev | host | net | port | proto
        parser = Optional(Boolean(keyword))
        return StringStart() + parser + StringEnd()
        #
    @staticmethod
    def _process_boolean(tokens, callback_func, callback_args=None):
        """Handles tokens that describe a boolean expression. We must provide
        a callback function that handles the operands (keywords or values)."""
        # convert the token list into a tuple, remember that all the arguments
        # of a cached function must be hashable! the tokens are used in such
        # functions below (see 'chech_tokens()')
        tokens = tuple(tokens)
        # shortcut for recursive calls
        recurse = lambda tokens: (
            Netfilter._process_boolean(tokens, callback_func, callback_args))
        # if we have a single list of tokens, process the elements recursively
        if check_tokens(tokens, (ParseResults,)) == 1:
            return recurse(tokens[0])
        # if we have a 'not' operator, apply a negation and process the result
        # recursively
        if check_tokens(tokens, ('not', ParseResults)) == 2:
            tokens = tokens[1]
            # not not x = x
            if check_tokens(tokens, ('not', ParseResults)) == 2:
                new_tokens = tokens[1]
                rules = recurse(new_tokens)
            # x and y = not x or not y
            elif check_tokens(tokens, (ParseResults, 'and', ParseResults)) > 2:
                new_tokens = []
                for token in tokens:
                    if token == 'and':
                        new_tokens.append('or')
                    else:
                        new_tokens.append(ParseResults(['not', token]))
                rules = recurse(new_tokens)
            # x or y = not x and not y
            elif check_tokens(tokens, (ParseResults, 'or', ParseResults)) > 2:
                new_tokens = []
                for token in tokens:
                    if token == 'or':
                        new_tokens.append('and')
                    else:
                        new_tokens.append(ParseResults(['not', token]))
                rules = recurse(new_tokens)
            # not port 1234 = port not 1234
            else:
                new_tokens = []
                for token in tokens:
                    if isinstance(token, basestring):
                        new_tokens.append(token)
                    else:
                        new_tokens.append(ParseResults(['not', token]))
                rules = recurse(new_tokens)
                # if we have a single value
                if len(new_tokens) == 1:
                    new_rules = []
                    for chain, condition, target in rules:
                        if ' -' in condition:
                            prefix, _, suffix = condition.partition(' -')
                            condition = '%s ! -%s' % (prefix, suffix)
                        else:
                            condition = '! %s' % condition
                        new_rules.append((chain, condition, target))
                    rules = new_rules
            return rules
        # if we have a 'and' operator, process each operand recursively and
        # connect the results by modifying chains and targets properly
        if check_tokens(tokens, (ParseResults, 'and', ParseResults)) > 2:
            operand1 = recurse(tokens[0])
            operand2 = recurse(tokens[2:]) # the slice handles the case where
                                           # we have more than 2 operands
            rules = []
            for chain, condition, target in operand2:
                if chain == operand2[0][0]:
                    chain = operand1[-1][2]
                if target == operand2[0][0]:
                    target = operand1[-1][2]
                rules.append((chain, condition, target))
            rules = operand1 + rules
            return rules
        # if we have a 'or' operator, process each operand recursively and
        # connect the results by modifying chains and targets properly
        if check_tokens(tokens, (ParseResults, 'or', ParseResults)) > 2:
            operand1 = recurse(tokens[0])
            operand2 = recurse(tokens[2:]) # the slice handles the case where
                                           # we have more than 2 operands
            rules = []
            for chain, condition, target in operand2:
                if chain == operand2[0][0]:
                    chain = operand1[0][0]
                elif chain == operand2[-1][2]:
                    chain = operand1[-1][2]
                if target == operand2[0][0]:
                    target = operand1[0][0]
                elif target == operand2[-1][2]:
                    target = operand1[-1][2]
                rules.append((chain, condition, target))
            rules = operand1 + rules
            return rules
        # if we have a single operand, process it with the callback function,
        # with or without arguments
        elif callback_args:
            return callback_func(tokens, callback_args)
        else:
            return callback_func(tokens)
        #
    @staticmethod
    def _process_keyword(tokens):
        """Handles tokens that describe a custom keyword ('dev', 'host', 'net',
        'port', etc)."""
        # if we have a single string, it should be a protocol, it works also if
        # we have no filter at all
        if check_tokens(tokens, (basestring,)) == 1:
            if tokens[0] in ('ip', 'any'):
                condition = '' # nothing to do
            elif tokens[0] in ('icmp', 'tcp', 'udp'):
                condition = '-p %s' % tokens[0]
            else:
                raise ParseException(trunc_repr(tokens))
            chain = Netfilter._new_chain_id()
            target = Netfilter._new_chain_id()
            return [(chain, condition, target)]
        # if we have a direction+keyword and a value
        if check_tokens(tokens, (basestring, ParseResults)) == 2:
            protocol = '' # no protocol specified here
            direction, _, keyword = tokens[0].rpartition(' ')
            value = tokens[1]
        # if we have a protocol, a direction+keyword and a value
        elif check_tokens(tokens, (basestring, basestring, ParseResults)) == 3:
            protocol = tokens[0]
            direction, _, keyword = tokens[1].rpartition(' ')
            value = tokens[2]
        # if we have no filter at all
        elif not tokens:
            chain = Netfilter._new_chain_id()
            target = Netfilter._new_chain_id()
            return [(chain, '', target)]
        else:
            raise ParseException(trunc_repr(tokens))
        # now parse the value associated with the keyword, it can be a single
        # value or a boolean expression so we use '_process_boolean()' with
        # '_process_value()' as a callback function
        callback_func = Netfilter._process_value
        callback_args = {'protocol' : protocol,
                         'direction': direction,
                         'keyword'  : keyword,}
        return Netfilter._process_boolean(value, callback_func, callback_args)
        #
    @staticmethod
    def _process_value(tokens, context):
        """Handles tokens that describe a single value (IP, network, etc)."""
        # get the context required to choose the right iptables options
        protocol = context['protocol']
        direction = context['direction']
        keyword = context['keyword']
        # select the appropriate iptables options
        options_by_direction = {'in' : 0,
                                'out': 1,
                                'src': 0,
                                'dst': 1,}
        options_by_keyword   = {'dev' : ['-i', '-o'],
                                'host': ['-s', '-d'],
                                'net' : ['-s', '-d'],
                                'port': ['--sport', '--dport'],}
        options = options_by_keyword[keyword]
        if direction:
            options = [options[options_by_direction[direction]]]
        # if we have a host we need to resolve the hostname
        if keyword == 'host':
            values = resolv(tokens[0])
        else:
            values = [tokens[0]]
        # if we have a port we need to specify for which protocol
        if keyword == 'port':
            new_options = []
            for option in options:
                if protocol in ['', 'tcp']:
                    new_options.append('-p tcp %s' % option)
                if protocol in ['', 'udp']:
                    new_options.append('-p udp %s' % option)
            options = new_options
        # use the selected options to create netfilter rules, each rule is
        # composed of a chain, a condition (iptables syntax) and a target
        rules = []
        chain = Netfilter._new_chain_id()
        target = Netfilter._new_chain_id()
        for value in values:
            for option in options:
                condition = '%s %s' % (option, value)
                rules.append((chain, condition, target))
        return rules
        #
    @staticmethod
    def _chain_replace(src_chain, dst_chain, rules):
        """Replaces all the occurrences of a given chain by another one."""
        new_rules = []
        for chain, condition, target in rules:
            if chain == src_chain:
                chain = dst_chain
            if target == src_chain:
                target = dst_chain
            new_rules.append((chain, condition, target))
        return new_rules
        #
    @staticmethod
    def _apply_rules(table, first_chain, rules, last_target,
                     insert_in_first_position=True, custom_condition=''):
        """Applies a set of rules in a given Netfilter table. Rules must be
        generated by '_process_*()' handlers (see above). Also, remember that
        '_raw_iptables()' is cached, so a rule can't be added twice even if
        this function is called several times (typically, once for each of the
        3 Netfilter chains INPUT, OUTPUT and FORWARD)."""
        # insert a new rule with the custom condition in first position
        if custom_condition:
            chain = Netfilter._new_chain_id()
            rule = (chain, custom_condition, rules[0][0])
            rules.insert(0, rule)
        # remove rules with empty condition
        while 1:
            for chain, condition, target in rules:
                # empty condition found
                if not condition:
                    src_chain = chain
                    dst_chain = target
                    break
            else:
                # no more empty condition
                break
            if len(rules) > 1:
                # remove the current empty condition
                rules.remove((src_chain, '', dst_chain))
                rules = Netfilter._chain_replace(src_chain, dst_chain, rules)
            else:
                break
        # replace all the occurrences of rules[0][0] by first_chain
        nb_first_chain = [chain for chain, _, _ in rules].count(rules[0][0])
        if nb_first_chain == 1: #and not insert_in_first_position:
            rules = Netfilter._chain_replace(rules[0][0], first_chain, rules)
        # replace all the occurrences of rules[-1][-1] by last_target
        rules = Netfilter._chain_replace(rules[-1][-1], last_target, rules)
        # create the new chains
        new_chains = list(set([x[0] for x in rules] + [x[2] for x in rules]))
        Netfilter_raw_iptables = Netfilter._raw_iptables
        Netfilter_existing_chains = Netfilter._existing_chains
        Netfilter_existing_chains_append = Netfilter._existing_chains.append
        for new_chain in new_chains:
            if new_chain not in Netfilter_existing_chains:
                Netfilter_raw_iptables('-t %s -N %s' % (table, new_chain))
                Netfilter_existing_chains_append(new_chain)
        # remove doubles
        Netfilter._existing_chains = list(set(Netfilter._existing_chains))
        # fill the chains with the rules
        for chain, condition, target in rules:
            if condition:
                condition = ' %s' % condition
            if (insert_in_first_position and
                nb_first_chain == 1 and chain == first_chain
            ):
                args = table, chain, condition, target
                Netfilter._raw_iptables('-t %s -I %s 1%s -j %s' % args)
            else:
                args = table, chain, condition, target
                Netfilter._raw_iptables('-t %s -A %s%s -j %s' % args)
        # link the first chain with the new rules
        if nb_first_chain > 1:
            if insert_in_first_position:
                args = table, first_chain, rules[0][0]
                Netfilter._raw_iptables('-t %s -I %s 1 -j %s' % args)
            else:
                args = table, first_chain, rules[0][0]
                Netfilter._raw_iptables('-t %s -A %s -j %s' % args)
        #
    @staticmethod
    @cached # can't run the same iptables command more than once!
    def _raw_iptables(args):
        """Runs iptables with the given command line arguments."""
        # don't log -D, -F, -N and -X lines
        no_stderr = ''
        for no_display_arg in ('D', 'F', 'N', 'X'):
            if r(r'-\b%s\b' % no_display_arg).search(args):
                no_stderr = ' 2> /dev/null'
                break
        else:
            logging_debug("iptables %s" % args)
        os.system('iptables %s%s' % (args, no_stderr))
        #
    #

def init():
    logging_info("# http://code.google.com/p/iptables-dot-py/")
    # enable ip forward
    os.system('echo 1 > /proc/sys/net/ipv4/ip_forward')
    os.system('modprobe ip_nat_ftp')
    os.system('modprobe ip_conntrack_ftp')
    # protection against icmp broadcasts
    os.system('echo 1 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts')
    # protection against tcp syn cookies
    os.system('echo 1 > /proc/sys/net/ipv4/tcp_syncookies')
    # save strange packets (spoofed, redirected, etc)
    os.system('echo 1 > /proc/sys/net/ipv4/conf/all/log_martians')
    # protection against bogus icmp responses
    os.system('echo 1 > /proc/sys/net/ipv4/icmp_ignore_bogus_error_responses')
    # protection against ip spoofing
    os.system('echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter')
    # disable icmp redirects
    os.system('echo 0 > /proc/sys/net/ipv4/conf/all/accept_redirects')
    os.system('echo 0 > /proc/sys/net/ipv4/conf/all/send_redirects')
    os.system('echo 0 > /proc/sys/net/ipv4/conf/eth0/send_redirects')
    # disable source routing
    os.system('echo 0 > /proc/sys/net/ipv4/conf/all/accept_source_route')
    #

def defaults():
    Netfilter._raw_iptables('-t filter -F')
    Netfilter._raw_iptables('-t filter -X')
    Netfilter._raw_iptables('-t nat -F')
    Netfilter._raw_iptables('-t nat -X')
    Netfilter._raw_iptables('-t filter -P INPUT DROP')
    Netfilter._raw_iptables('-t filter -P FORWARD DROP')
    Netfilter._raw_iptables('-t filter -P OUTPUT DROP')
    Netfilter._raw_iptables('-t nat -P PREROUTING ACCEPT')
    Netfilter._raw_iptables('-t nat -P OUTPUT ACCEPT')
    Netfilter._raw_iptables('-t nat -P POSTROUTING ACCEPT')
    Netfilter._raw_iptables('-t nat -A POSTROUTING -j MASQUERADE')
    Netfilter._raw_iptables('-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT')
    Netfilter._raw_iptables('-A FORWARD -m state --state ESTABLISHED,RELATED -j ACCEPT')
    Netfilter._raw_iptables('-A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT')
    Netfilter._raw_iptables('-A INPUT -i lo -j ACCEPT')
    Netfilter._raw_iptables('-A OUTPUT -o lo -j ACCEPT')
    #

def ulog():
    Netfilter._raw_iptables('-A INPUT -j ULOG --ulog-prefix="[IPTABLES DROP]"')
    Netfilter._raw_iptables('-A OUTPUT -j ULOG --ulog-prefix="[IPTABLES DROP]"')
    Netfilter._raw_iptables('-A FORWARD -j ULOG --ulog-prefix="[IPTABLES DROP]"')
    #

def iptables_bpf(table, first_chain, bpf_filter, last_target, custom_condition):
    parser = Netfilter._capture_filter_parser()
    tokens = parser.parseString(bpf_filter)
    rules = Netfilter._process_boolean(tokens, Netfilter._process_keyword)
    Netfilter._apply_rules(table, first_chain, rules, last_target, False, custom_condition)
    #

def INPUT(bpf_filter='', custom_condition=''):
    iptables_bpf('filter', 'INPUT', bpf_filter, 'ACCEPT', custom_condition)
    #

def OUTPUT(bpf_filter='', custom_condition=''):
    iptables_bpf('filter', 'OUTPUT', bpf_filter, 'ACCEPT', custom_condition)
    #

def FORWARD(bpf_filter='', custom_condition=''):
    iptables_bpf('filter', 'FORWARD', bpf_filter, 'ACCEPT', custom_condition)
    #

def DNAT(bpf_filter, to_addr, custom_condition=''):
    iptables_bpf('nat', 'PREROUTING', bpf_filter,
                 'DNAT --to-destination %s' % to_addr, custom_condition)
    #

def REDIRECT(bpf_filter, to_ports, custom_condition=''):
    iptables_bpf('nat', 'PREROUTING', bpf_filter,
                 'REDIRECT --to-ports %s' % to_ports, custom_condition)
    #

###############################################################################
# Main entry point
###############################################################################

# configure the logging system
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)

# create a default console handler
console_logging_formatter = ColorFormatter(
    "%%(asctime)s %s "
    "Proxyshark(%%(process)s): "
    "%%(threadName)s: "
    "$COLOR[%%(levelname)s] %%(message)s$RESET" %
    socket.gethostname())
handler = logging.StreamHandler()
handler.setFormatter(console_logging_formatter)
logger.addHandler(handler)

# check if we have root permissions
if os.getuid() != 0:
    logging_error("permission denied")
    sys.exit(1)

init()
defaults()
configure()
ulog()
logging_info("Done.")

