#!/usr/bin/env python
# -*- encoding: utf8 -*-

from __future__ import print_function

import sys
import optparse
import operator

import gdata.blogger.client
import gdata.sample_util
import atom.data
from lxml import etree


__autor__ = 'fcamel'


#-------------------------------------------------------------------------------
# Process XML
#-------------------------------------------------------------------------------
class Post(object):
    def __init__(self, title, published, categories, content):
        self.title = title
        self.published = published
        self.categories = categories
        self.content = content

    def __unicode__(self):
        lines = [
            u'title     : %s' % self.title,
            u'published : %s' % self.published,
            u'categories: %s' % u', '.join(self.categories),
            u'content   : %s' % self.content[:30],
        ]
        return u'\n'.join(lines)

    def __str__(self):
        return unicode(self).encode('utf8')


def read_xml(file_name):
    with open(file_name) as fr:
        return fr.read()

def extract_tag(entry, tag, namespace):
    es = entry.xpath('ns:%s' % tag, namespaces={'ns': namespace})
    return map(operator.attrgetter('text'), es)

def get_categories(entry, namespace):
    google_reader_url = 'http://www.google.com/reader/'
    xpath = 'ns:category[@scheme!="%s" or not(@scheme)]' % google_reader_url
    results = entry.xpath(xpath, namespaces={'ns': namespace})
    return [e.attrib['term'] for e in results]

def extract_post(entry):
    default_namespace = entry.nsmap[None]
    tags = [ 'title', 'published', 'category', 'content' ]
    adict = dict(zip(tags, [''] * len(tags)))
    for tag in tags:
        if tag == 'category':
            adict[tag] = get_categories(entry, default_namespace)
            continue

        results = extract_tag(entry, tag, default_namespace)
        adict[tag] = results[0] if results else u''

    return Post(adict['title'], adict['published'],
                adict['category'], adict['content'])

def to_posts(xml_string):
    root = etree.fromstring(xml_string)
    default_namespace = root.nsmap[None]
    entries = root.xpath('ns:entry', namespaces={'ns': default_namespace})
    return [extract_post(e) for e in entries]

#-------------------------------------------------------------------------------
# Blogger API.
#-------------------------------------------------------------------------------

class MyBloggerClient(gdata.blogger.client.BloggerClient):
    def add_post(self, blog_id, title, body, labels=None, published=None, draft=False,
                 auth_token=None, title_type='text', body_type='html', **kwargs):
        '''
        Construct an atom Entry for the blog post to be sent to the server.
        '''
        # This method is copied from gdata.blogger.client and add an extra
        # argument "published".
        new_entry = gdata.blogger.data.BlogPost(
                title=atom.data.Title(text=title, type=title_type),
                content=atom.data.Content(text=body, type=body_type))
        if labels:
            for label in labels:
                new_entry.add_label(label)
        if published:
          new_entry.published = atom.data.Published(text=published)
        if draft:
            new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
        return self.post(new_entry, gdata.blogger.client.BLOG_POST_URL % blog_id,
                         auth_token=auth_token, **kwargs)

    def get_blog_id(self, url):
        blogs = self.get_blogs()
        for b in blogs.entry:
            if b.get_html_link().href == url:
                return b.get_blog_id()
        return None

    def submit(self, post, blog_id, verbose=True):
        if verbose:
            print('-' * 80)
            print(str(post))

        self.add_post(blog_id, post.title, post.content, labels=post.categories,
                      published=post.published, title_type='html')

        if verbose:
            print('>> DONE.')


class FakeMyBloggerClient(MyBloggerClient):
    def add_post(self, *args, **kwargs):
        pass

    def get_blog_id(self, url):
        return 0


def create_blogger_client(dry_run):
    if dry_run:
        client = FakeMyBloggerClient()
    else:
        client = MyBloggerClient()
        gdata.sample_util.authorize_client(client, service='blogger')
    return client

#-------------------------------------------------------------------------------
# main.
#-------------------------------------------------------------------------------
class FailedPointRecorder(object):
    FAILED_POINT_FILE_NAME = '.blogger_import_posts'

    def __init__(self):
        pass

    def read_failed_point(self):
        with open(FailedPointRecorder.FAILED_POINT_FILE_NAME) as fr:
            for line in fr:
                try:
                    n = int(line)
                    return n
                except Exception, e:
                    print('Cannot read the failed point.')
        print('Read no data from %s.' % FailedPointRecorder.FAILED_POINT_FILE_NAME)
        sys.exit(1)

    def write_failed_point(self, begin):
        with open(FailedPointRecorder.FAILED_POINT_FILE_NAME, 'w') as fw:
            print('%d' % begin, file=fw)

    def clean(self):
        with open(FailedPointRecorder.FAILED_POINT_FILE_NAME, 'w') as fw:
            print('', file=fw)


def process(posts, begin, client, blog_id, recorder):
    for i, p in enumerate(posts):
        if i + 1 < begin:
            continue

        try:
            client.submit(p, blog_id)
        except Exception, e:
            recorder.write_failed_point(i + 1)
            raise

    recorder.clean()

def main(input_file, to_url, options):
    '''\
    %prog [options] <input_file> <to_blog_url>

    Extract posts in <input_file> and then post them to <to_blog_url>.
    After being executed, the script will ask you the username and password
    for <to_blog_url>.

    input_file   : a XML file downloaded from Google Reader.
    to_blog_url  : it must be a Blogger site.
    '''
    client = create_blogger_client(options.dry_run)

    xml_string = read_xml(input_file)
    posts = to_posts(xml_string)

    blog_id = client.get_blog_id(to_url)
    if blog_id is None:
        print('Cannot find the blog %s' % to_url, file=sys.stderr)
        return 1

    recorder = FailedPointRecorder()
    begin = recorder.read_failed_point() if options.continue_ else options.begin
    process(posts, begin, client, blog_id, recorder)

    return 0


if __name__ == '__main__':
    parser = optparse.OptionParser(usage=main.__doc__)
    parser.add_option("-d", "--dry-run",
                      action="store_true", dest="dry_run", default=False,
                      help="Run the whole process without interacting to Blogger.")
    parser.add_option("-b", "--begin",
                      action="store", dest="begin", default=1, type=int,
                      help="Submit the posts from the BEGINth post.")
    parser.add_option("-c", "--continue",
                      action="store_true", dest="continue_", default=False,
                      help=("Continue from the last failed point if exists. "
                            "This option is supervior than --begin."))
    options, args = parser.parse_args()

    if len(args) != 2:
        parser.print_help()
        sys.exit(1)

    input_file, to_url = args
    sys.exit(main(input_file, to_url, options))
