import sys, os, re, amara, posixpath
from datetime import datetime
from brightcontent.core.store import COMMON_PREFIXES, ATOM10_NS, flatfile_repository, ENVELOPE_XML_FILE, OMIT_FROM_SLUG_PAT
from brightcontent.util import fixup_namespaces
from amara.binderytools import quick_xml_scan

from optparse import OptionParser

# based on brightcontent.plugins.atomprotocol ENTRY_SKELETON
ENTRY_SKELETON = """<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
  <id></id>
  <title></title>
  <published></published>
  <updated></updated>
</entry>
"""
links = {
    'self': { u'rel' : u'self', u'title' : u'Atom Entry', u'href' : u'' },
    'edit': { u'rel' : u'edit', u'title' : u'Edit Atom Entry', u'href' : u'' },
    'permalink':{ u'rel' : u'alternate', u'title' : u'Permalink', u'href' : u'', u'type' : 'text/html' }
}

def get_links(base_iri, published, slug):
    entry_links = links
    link_date = published.strftime('%Y/%m/%d')
    perma_link = posixpath.join(link_date, slug)
    edit_iri = ''.join([posixpath.join(base_iri, perma_link), '.atom'])
    #if weblog_base_url:
    #    perma_link = Uri.Relativize(edit_iri, base_iri)
    #else:
    #    perma_link = edit_iri
    self_iri = edit_iri
    entry_links['self']['href'] = unicode(self_iri)
    entry_links['edit']['href'] = unicode(edit_iri)
    # had to add a preceding slash
    entry_links['permalink']['href'] = unicode('/%s' % perma_link)
    return entry_links

def create_link(doc, rel='', title='', href=''):
    iri_link = doc.xml_create_element(
        u'link', COMMON_PREFIXES[u'atom'],
        attributes={u'rel' : unicode(rel),
                    u'title' : unicode(title),
                    u'href' : unicode(href)}
    )
    return iri_link

def id_to_fname(id_):
    return re.sub('\W', '_', id_)
    #return '-'.join(id_.split('/')[-4:])
    #return id.replace(os.path.sep, '-')

if __name__ == '__main__':
    usage = '%s [input file or url] [output directory] [OPTIONS]'
    parser = OptionParser()
    parser.add_option('-u', '--base-url', dest='base_url')

    (options, args) = parser.parse_args()
    if len(args) < 2:
        print
        print usage
        print
        sys.exit(1)

    input_file_or_url = args[0]
    output_dir = args[1]
    if not options.base_url:
        base_url = 'http://localhost:8080/blog'
    else:
        base_url = options.base_url.strip('/')
    print 'Using "%s" for the base url.' % base_url

    try:
        os.makedirs(output_dir)
    except OSError:
        pass            
        
    doc = amara.parse(input_file_or_url, prefixes=COMMON_PREFIXES)
    envelope = amara.create_document(u"atom:feed", ns=ATOM10_NS)
    for nonentry in doc.feed.xml_xpath(u'*[not(self::atom:entry)]'):
        envelope.feed.xml_append(nonentry)
    f = open(os.path.join(output_dir, ENVELOPE_XML_FILE), 'w')
    fixup_namespaces(envelope)
    f.write(envelope.xml(indent=u"yes"))
    f.close()
    store = flatfile_repository(flatfile_storedir=output_dir)

    authors = []
    for author in doc.feed.xml_xpath(u'/atom:feed/atom:author'):
        authors.append(author)
    for entry in doc.feed.entry:
        new_entry = amara.parse(ENTRY_SKELETON, prefixes=COMMON_PREFIXES)
        new_entry.entry.title = unicode(entry.title)
        orig_authors = entry.xml_xpath('atom:author')
        if orig_authors:
            for author in entry.xml_xpath('atom:author'):
                new_entry.entry.xml_insert_after(new_entry.entry.title, author)
        else:
            for author in authors:
                new_entry.entry.xml_insert_after(new_entry.entry.title, author)
        updated_date = datetime.now()
        if unicode(getattr(entry, 'published', '')):
            new_entry.entry.published = unicode(entry.published)
        else:
            new_entry.entry.published = unicode(entry.updated)
        new_entry.entry.updated = unicode(entry.updated)
        slug = str(entry.title)
        slug = OMIT_FROM_SLUG_PAT.sub('_', slug).decode('utf-8')
        # assume the base_iri contains the collection...
        pub_date = amara.binderytools.parse_isodate(str(new_entry.entry.published))
        iri_id = posixpath.join(base_url, pub_date.strftime('%Y/%m/%d'), slug) + '.atom'
        new_entry.entry.id = unicode(iri_id)
        entry_links = get_links(base_url, pub_date, slug)
        for rel, link in entry_links.items():
            link_xml = create_link(new_entry, link['rel'], link['title'], link['href'])
            new_entry.entry.xml_insert_before(new_entry.entry.id, link_xml)
        fixup_namespaces(new_entry)
        # adding content afterward to get better namespaces... is this a good idea?
        new_entry.entry.xml_append(entry.content)
        store.create_entry(new_entry.xml(indent=True), slug)
        print "Created: %s" % iri_id
                     
