import re
from django.template.defaultfilters import slugify
from BeautifulSoup import BeautifulSoup, Comment

def get_unique_slug(classname, titlestring):
    slug = slugify(titlestring)
    while classname.objects.filter(slug=slug).count()>0:
        bits = slug.split('-')
        if '-' in slug and bits[-1] in '123456789':
            bits[-1] = str(int(bits[-1]) + 1)
            slug = ''.join(bits)
        else:
            slug = slug + '-1'
    return slug
    
def strip_leading_and_trailing_p_tags(string):
    """ string may contain line breaks and html
    """
    p_stripper = re.compile(r'^\s*<p>(.*)</p>\s*$', re.DOTALL)
    return p_stripper.sub(r'\1', string)
    
def sanitize(untrusted_html, tag_whitelist=None, attr_whitelist=None, tag_blacklist=None):
    from copy import copy
    """Strips potentially harmful tags and attributes from HTML.
    http://pastie.org/241799

    Based on the work of:
     - Tom Insam <http://jerakeen.org/blog/2008/05/sanitizing-comments-with-python/>
     - akaihola <http://www.djangosnippets.org/snippets/169/>
    """
    # Allow these tags. This can be changed to whatever you please, of course.
    if tag_whitelist==None: # ie you can pass empty list to remove all HTML
        tag_whitelist = [
            # 'a', 'abbr', 'address', 'b', 'blockquote',
            # 'br', 'code', 'cite', 'em', 'i', 'ins', 'kbd',
            # 'p', 'q', 'samp', 'small', 'strike', 'strong', 'sub',
            # 'sup', 'var'
            'a', 'b', 'em', 'i', 'strong', 'p', 'div', 'span',
        ]

    # Allow only these attributes on these tags. No other tags are allowed
    # any attributes.
    if attr_whitelist==None:
        attr_whitelist = {
            'a': ['href', 'title', 'hreflang'],
            'img': ['src', 'width', 'height', 'alt', 'title'],
        }

    # Remove these tags, complete with contents.
    if tag_blacklist==None:
        tag_blacklist = [ 'script', 'style' ]

    attributes_with_urls = [ 'href', 'src' ]

    soup = BeautifulSoup(untrusted_html)
    # Remove HTML comments
    for comment in soup.findAll(
        text=lambda text: isinstance(text, Comment)):
        comment.extract()
    # Remove unwanted tags
    for tag in soup.findAll():
        # Remove blacklisted tags and their contents.
        if tag.name.lower() in tag_blacklist:
            tag.extract()
        # Hide non-whitelisted tags.
        elif tag.name.lower() not in tag_whitelist:
            tag.hidden = True
        else:
            for attr in copy(tag.attrs):
                # Attributes in the attr_whitelist are considered, but on
                # a per-tag basis.
                if tag.name.lower() in attr_whitelist and attr[0].lower() in attr_whitelist[ tag.name.lower() ]:
                    # Some attributes contain urls..
                    if attr[0].lower() in attributes_with_urls:
                        # .. so make sure they're nice urls
                        if not re.match(r'(https?|ftp)://|^/|^(mailto):', attr[1].lower()):
                            tag.attrs.remove(attr)
                else:
                    # Non-whitelisted attributes are removed entirely.
                    tag.attrs.remove( attr )
    return unicode(soup)
    
    
def nicely_remove_all_html(html):
    html = html.replace('</p>', '</p> ') # paragraphs include implicit white space
    text = sanitize(html, tag_whitelist=[])
    return text
