# encoding=utf-8
import cPickle as pickle
import json
import re
import time

import httplib2
from datetime import datetime
from scrapy import Selector
from scrapy.http import Request
from scrapy.spiders import CrawlSpider

from jobspider.items import JobItem, CompanyInfoItem, JobDetailItem
from jobspider.spiders import Util


class Spider(CrawlSpider):
    name = "job58Spider"

    def start_requests(self):

        keyword = ""

        now = time.strftime("%Y%m%d", time.localtime())

        date_range = now + "_" + str(int(now) + 1)
        # date_range = "20170423_20170424"

        search_url = "http://gz.58.com/job/pn1?utm_source=link&spm=s-35088951609095-pe-f-801.psy_p1&PGTID=0d100000-0000-3059-8b06-491756665338&ClickID=1&postdate=" + date_range

        if keyword:
            search_url += "&key=" + keyword

        print "search_url", search_url
        yield Request(url=search_url, callback=self.parse_search)

    def parse_search(self, response):
        content = response.body
        sel = Selector(text=content)

        # print content

        dls = sel.xpath("//dl[@__addition='0']")

        for dl in dls:
            job_url = dl.xpath("dt/a/@href").extract_first()
            job_name = dl.xpath("dt/a/text()").extract_first()

            company_detail_url = dl.xpath("dd[@class='w271']/a[@class='fl']/@href").extract_first()
            company_name = dl.xpath("dd[@class='w271']/a[@class='fl']/text()").extract_first()

            company_level = dl.xpath("dd[@class='w271']/a[@class='famousCompanyIcon listPageIcon']/@title").extract_first()
            # print "company_level", company_level

            district = dl.xpath("dd[@class='w96']/text()").extract_first()
            time = dl.xpath("dd[@class='w68']/text()").extract_first()
            publish_time = Util.time_transfer(time)


            job = JobItem()

            job["job_url"] = job_url
            job["job_name"] = job_name

            job["company_name"] = company_name
            job["company_detail_url"] = company_detail_url

            companyinfo = CompanyInfoItem()
            companyinfo["company_name"] = company_name
            companyinfo["company_level"] = company_level
            companyinfo["company_detail_url"] = company_detail_url

            job["city"] = u"\u5e7f\u5dde"

            job["district"] = district

            job["job_detail_address"] = job["city"] + " " + job["district"]

            job["salary"] = None

            job["publish_time"] = publish_time

            job["query_time"] = datetime.now().strftime("%Y-%m-%d-%H-%M")

            job["query_type"] = "58同城"

            yield job
            yield companyinfo
            # yield Request(url=job_url, callback=self.parse_job_detail,
            #               meta={"job_url": job_url, "companyinfo": companyinfo})
            # 名企
            # if company_level == u"\u540d\u4f01":
            #     print u"\u540d\u4f01", company_detail_url
            #     yield Request(url=company_detail_url, callback=self.parse_famous_company_detail,
            #                   meta={"companyinfo": companyinfo})
            #     break
            # else:
            #     yield Request(url=company_detail_url, callback=self.parse_company_detail,
            #                   meta={"companyinfo": companyinfo})



        next_url = sel.xpath("//a[@class='next']/@href").extract_first()
        if next_url:
            print next_url
            yield Request(url=next_url, callback=self.parse_search)


    def parse_job_detail(self, response):

        content = response.body
        # print response.body
        # sel = Selector(text=content)
        #
        # job_url = response.meta["job_url"]
        # companyinfo = response.meta["companyinfo"]
        #
        # cn = sel.xpath("//p[@class='msg ltype']/text()").extract_first().strip()
        # cn_msg = cn.split("|")
        # try:
        #     conpany_type = cn_msg[0]
        #     conpany_size = cn_msg[1]
        #     company_orientation = cn_msg[2]
        # except:
        #     conpany_type = None
        #     conpany_size = None
        #     company_orientation = None
        #
        # sp4 = sel.xpath("//span[@class='sp4']")
        # sp4_em = {}
        # for em in sp4:
        #     em_id = em.xpath("em/@class").extract_first()
        #     sp4_em[em_id] = em.xpath("text()").extract_first()
        # if "i1" in sp4_em:
        #     experience = sp4_em["i1"]
        # else:
        #     experience = None
        # if "i2" in sp4_em:
        #     education_level = sp4_em["i2"]
        # else:
        #     education_level = None
        # if "i3" in sp4_em:
        #     recruit_num = sp4_em["i3"]
        # else:
        #     recruit_num = None
        # welfare = sel.xpath("//p[@class='t2']/span/text()").extract()
        #
        # work_welfare = ''.join(a + "," for a in welfare)[:-1]
        #
        # occupation_info = sel.xpath("//div[@class='bmsg job_msg inbox']").extract_first()
        # company_info = sel.xpath("//div[@class='tmsg inbox']").extract_first()
        #
        # job_contact_information = sel.xpath("//p[@class='fp']/text()").extract()[1]
        #
        # query_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
        #
        # jobdetail = JobDetailItem()
        # jobdetail["job_url"] = job_url
        # # 工作年限
        # jobdetail["experience"] = experience
        # # 教育水平
        # jobdetail["education_level"] = education_level
        # # 招聘人数
        # jobdetail["recruit_num"] = recruit_num
        # # 公司福利
        # jobdetail["work_welfare"] = work_welfare
        # # 职位信息
        # jobdetail["occupation_info"] = occupation_info
        # # 工作联系方式
        # jobdetail["job_contact_information"] = job_contact_information
        # # 查询时间
        # jobdetail["query_time"] = query_time
        #
        # # 公司信息
        # companyinfo["company_info"] = company_info
        # # 公司类型
        # companyinfo["conpany_type"] = conpany_type
        # # 公司人数
        # companyinfo["conpany_size"] = conpany_size
        # # 公司定位
        # companyinfo["company_orientation"] = company_orientation
        #
        # yield companyinfo
        # yield jobdetail

    def parse_company_detail(self, response):
        # print response.body
        pass

    def parse_famous_company_detail(self, response):
        pass
        # content = response.body
        # sel = Selector(text=content)
        #
        # companyinfo = response.meta["companyinfo"]
        # company_name = sel.xpath("//div[@class='intro_middle']/h3/text()").extract_first()
        #
        # company_info = sel.xpath("//p[@class='dis_non']/text()").extract_first()