# coding:utf-8

import requests
import scrapy

from bs4 import BeautifulSoup
import requests,re
import sys
from settings import URLS

class JobSpider(scrapy.Spider):
    name = "all_job"
    allowed_domains = ["51job.com"]
    all_page_urls = []

    def get_all_page_urls(self, base_url,meta):

        html_doc = requests.get(base_url+'1.html').content.decode('gbk')
        total_page = int(re.compile(u"共(\d+)页").findall(html_doc)[0])
        for page in range(1, total_page + 1):
            url = base_url + str(page) + '.html'
            self.all_page_urls.append({'url':url,'meta':meta})
    # for page in range(1, total_page + 1):
    #             url = url + str(page) + '.html'
    #             print url
    def start_requests(self):
        for url in URLS:
            meta = {'location':url['location'],'keyword':url['keyword']}
            self.get_all_page_urls(url['url'],meta=meta)

if __name__ == '__main__':
    job = JobSpider()
    job.start_requests()
    print requests.get(JobSpider.all_page_urls[2]['url']).content[:50]