#!/usr/bin/python
#coding:utf-8

import urllib
import urllib2
import re

# from elasticsearch import Elasticsearch
# from elasticsearch import helpers
import json
from datetime import datetime
from elasticsearch import Elasticsearch
es = Elasticsearch()

class Joke(object):
    """docstring for Joke"""
    def __init__(self,uid,name,joke,praise):
        self.uid = uid
        self.name = name
        self.joke = joke
        self.praise = praise
    
def object2json(obj):
    return {"uid":obj.uid,"name":obj.name,"joke":obj.joke,"praise":obj.praise}


for x in xrange(1,15):
    page = x
    url = 'http://www.qiushibaike.com/8hr/page/' + str(page)
    print url
    # url = 'http://www.qiushibaike.com/'
    # url = 'http://www.qiushibaike.com/8hr/page/' + str(page)
    user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
    headers = { 'User-Agent' : user_agent }

    esurl = ''
    try:
        # jokes 
        request = urllib2.Request(url,headers = headers)
        response = urllib2.urlopen(request)
        content =  response.read().decode("utf-8")
        # print content
        pattern = re.compile('<div.*?author clearfix">.*?<a href="/users/(.*?)".*?<img.*?alt="(.*?)"/>.*?</div>.*?<div.*?content">(.*?)<!-.*?</div>.*?<i.*?class="number">(.*?)</i>',re.S)
        items = re.findall(pattern,content)
        i=0
        jokes = []
        for item in items:
            joke=Joke(item[0],item[1],item[2],item[3].strip("\r\n"))
            jokes.append(joke)
        for joke in jokes:
            print object2json(joke)
            jokesJson = json.dumps(object2json(joke))
            res = es.index(index="joke-index", doc_type='joke',body=jokesJson)
            print jokesJson
            


    except urllib2.URLError, e:
        if hasattr(e,"code"):
            print e.code
        if hasattr(e,"reason"):
            print e.reason
