# -*- coding: utf-8 -*-
import scrapy
from jobs.items import JobsItem
from jobs.settings import IS_DEBUG

# from scrapy.shell import inspect_response

Locations = {"北京": "010000",
             "上海": "020000",
             "广州": "030200",
             "深圳": "040000",
             "武汉": "180200",
             "杭州": "080200"}

#Positions = ["python", "android"]
Positions =["Web前端","H5","JavaScript"]

# CSS = {
#     "company": "html body div.dw_wp div#resultList.dw_table div.el span.t2 a::text",
#     "company_link": "html body div.dw_wp div#resultList.dw_table div.el span.t2 a::attr(href)",
#     "job": "html body div.dw_wp div#resultList.dw_table div.el p.t1 span a::text",
#     "job_link": "html body div.dw_wp div#resultList.dw_table div.el p.t1 span a::attr(href)",
#     "location": "html body div.dw_wp div#resultList.dw_table div.el span.t3::text",
#     "salary": "html body div.dw_wp div#resultList.dw_table div.el span.t4::text",
#     "time": "html body div.dw_wp div#resultList.dw_table div.el span.t5::text",
#     "next": "html body div.dw_wp div.dw_page div.p_box div.p_wp div.p_in ul li.bk a::attr(href)"
# }

XPATH_BASE = "/html/body/div[@class='dw_wp']/div[@id='resultList']/div[@class='el']"

XPATHS = {
    "job": ".//p/span/a/text()",
    "job_link": ".//p/span/a/@href",
    "company": ".//span[@class='t2']/a/text()",
    "company_link": ".//span[@class='t2']/a/@href",
    "location": ".//span[@class='t3']/text()",
    "salary": ".//span[@class='t4']/text()",
    "time": ".//span[@class='t5']/text()",
    "next": "/html/body/div[@class='dw_wp']/div[@class='dw_page']/div[@class='p_box']/div[@class='p_wp']/div[@class='p_in']/ul/li[@class='bk'][2]/a/@href"
}


class A51jobSpider(scrapy.Spider):
    name = "51job"
    allowed_domains = ["51job.com"]
    start_urls = []
    for job in Positions:
        search_url = "http://search.51job.com/list/" + Locations[
            "上海"] + ",000000,0000,00,9,99," + job + ",2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
        print(search_url)
        start_urls.append(search_url)

    def parse(self, response):
        divs = response.xpath(XPATH_BASE)

        if IS_DEBUG:
            div = divs[0]
            item = self.parseItem(div)
            yield item

        else:
            for div in divs:
                item = self.parseItem(div)
                yield item

            next_urls = response.xpath(XPATHS["next"]).extract()
            if next_urls and len(next_urls) > 0:
                next_url = next_urls[0]
                yield scrapy.Request(next_url, callback=self.parse)

    def parseItem(self, div):
        job = div.xpath(XPATHS["job"]).extract()[0].strip()
        job_link = div.xpath(XPATHS["job_link"]).extract()[0].strip()
        company = div.xpath(XPATHS["company"]).extract()[0].strip()
        company_link = div.xpath(XPATHS["company_link"]).extract()[0].strip()
        location = div.xpath(XPATHS["location"]).extract()[0].strip()
        salarys = div.xpath(XPATHS["salary"]).extract()
        salary = u"面议"
        if salarys and len(salarys) > 0:
            salary = salarys[0].strip()
        time = div.xpath(XPATHS["time"]).extract()[0].strip()
        item = JobsItem()
        item["job"] = job
        item["job_link"] = job_link
        item["company"] = company
        item["company_link"] = company_link
        item["location"] = location
        item["salary"] = salary
        item["time"] = time
        return item
