from BeautifulSoup import BeautifulSoup
from google.appengine.api import urlfetch
from google.appengine.api import taskqueue
from model import StoryLink
import logging
import re


class CrawlNuoqiuLink:
    def __init__(self):
        self.site = 'nuoqiu.com'
        self.url = 'http://www.nuoqiu.com/column/%d.html'
        self.page_url = 'http://www.nuoqiu.com/column/%d/0/%d.html'
        self.MAXCAT = 15
    def get_content(self,url):
        try:
            text = ''
            result = urlfetch.fetch(url,deadline=10)
            if result.status_code == 200:
                text = result.content
                        #logging.info(str(text))
        except urlfetch.InvalidURLError:
            logging.info('InvalidURLError')
                    
        except urlfetch.ResponseTooLargeError:
            logging.info('ResponseTooLargeErrorget_reslut() failure')
        
        except urlfetch.DownloadError:
            logging.info('DownloadError get_reslut() failure')
        return text
    def getPageNum(self,p):
        soup = BeautifulSoup(p,fromEncoding="utf-8")
        if soup == None: continue
        l = soup.find('div',{'class':'pageviewp'})
        if l == None: continue
        lo = l.find('li',{'class':'info'})
        b = re.compile(r'^\d{1}/(\d+)')
        d = b.search(str(lo.string)).groups()[0]
        return d
    
    def getStoryList(self,p,i):
        story = {}
        soup = BeautifulSoup(p,fromEncoding="utf-8")
        list = soup.findAll('ul')
        for html in list:
            story_name = html.find('a').string
            href = html.find('a')['href']
            r = re.compile(r'^/static/(\d+)/$')
            story_id = r.search(str(href)).groups()[0]
            story[story_id] = story_name
        return story
    
def main():
    crawl = CrawlNuoqiuLink()
    for i in range(1,crawl.MAXCAT):
        newUrl = crawl.url % i
        page = crawl.get_content(newUrl)
        pagenum = crawl.getPageNum(page)
        for j in range(1,int(pagenum)):
            newPageUrl = crawl.page_url % (i,j)
            #page = crawl.get_content(newPageUrl)
            #story_list = crawl.getStoryList(page,i)
            logging.info(str(newPageUrl))
            storylink = StoryLink()
            storylink.site = crawl.site
            storylink.link =newPageUrl
            storylink.put()
            
#            taskqueue.add(url='/storylinkwork', 
#                          params={'url': newPageUrl})
                
                
if __name__ == '__main__':
	main()

        
        
    
        
        