#!/usr/bin/python
# -*- encoding:utf-8 -*-

import re
import httplib2
import time
from lxml.html import document_fromstring
import json
import logging
import socket
import random
import sys
logger = logging.getLogger('bokee.blog')

urlbase = "http://sibyl.bokee.com/"

def get_html_doc(url):
    headers = {'user-agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)'}
    for i in range(10):
        #logger.debug('Attempt %s:  %s'%(i+1,url))
        h = httplib2.Http(timeout=30)
        h.follow_redirects = False
        h.force_exception_to_status_code = False
        print('Attempt %s:  %s'%(i+1,url))
        try:
            resp, content = h.request(url,headers=headers)
            time.sleep(random.randint(1,10))
            if resp.status == '500':
                #logger.debug("Request got a 500 error: %s", uri)
                print("Request got a 500 error: %s", url)
                continue
            break
        except socket.timeout:
            #logger.debug("Request timed out after  seconds: %s %s", h.timeout, uri)
            print("Request timed out after  seconds: %s %s", h.timeout, url)
            continue
        except socket.error, e:
            #logger.debug("Got socket error: %s", e)
            print("Got socket error: %s", e)
            time.sleep(3)
            continue
        except AttributeError,e:
            print("Got Attribute error: %s", e)
            time.sleep(60)
            continue
            
    if resp.status == 200:
        doc = document_fromstring(content.decode('gb18030'))
    else:
        doc = None
    return doc

def get_title_and_contents(url):
    doc = get_html_doc(url)
    if doc is None:
        return None,None
    title = doc.xpath("//div[@class='entity']/h2/text()")[0]
    contents = doc.xpath("//div[@class='entity']//p[not(@*)]/text()")
    return title,contents


catalogs = get_html_doc(urlbase).xpath("//div[@id='blogIndex']//li//a[@href]/@href")
all_links = []
for catalog in catalogs:
    doc = get_html_doc("%s%s"%(urlbase,catalog))
    if doc is not None:
        links = doc.xpath('//li//a[@href]/@href')
        all_links = all_links + links

all_links = list(set(all_links))
all_links.sort(key= lambda x:int(x.split('.')[0]),reverse=True)

articles = []
for link in all_links:
    title,contents = get_title_and_contents("%s%s"%(urlbase,link))
    articles.append({
        'title':title,
        'contents':contents,
        })

with file(sys.argv[1],'w') as fp:
    json.dump(articles,fp,ensure_ascii=False,indent=4,)

