# -*- coding: utf-8 -*-
import scrapy
from urllib import parse
from scrapy.http import  Request

from bjzph.items import BjzphItem

import pymongo

MONGOD_URL = "localhost"
MONGOD_DB = 'bjzph_db'
MONGOD_TABLE = "bjzph"

client = pymongo.MongoClient(MONGOD_URL)
db = client[MONGOD_DB]

class ZphSpider(scrapy.Spider):
    name = 'zph'
    allowed_domains = ['www.bjzph.com']

    # 北京招聘会官方网站
    start_urls = ['https://www.bjzph.com/']

    def parse(self, response):

        # 获取全国各个城市的招聘link
        # 在导航栏上的a标签

        # 选择区块
        ul = response.css('div.daohang.jiange ul.youb2')

        a_links = ul.css('a::attr(href)').extract()
        # a_texts = ul.css('a::attr(title)').extract()
        # 拼接link
        for link in a_links:
            url=parse.urljoin(response.url, link)

            yield Request(
                url=url,
                callback=self.get_list_page
            )


    def get_list_page(self, response):
        """获取具体城市的招聘信息"""
        urls = response.css(".zphzt > a::attr(href)").extract()
        
        # 记录已经爬取过得url
        crawled_url = [item['url'] for item in db[MONGOD_TABLE].find() ]
        for url in urls:
            url=parse.urljoin(response.url, url)
            if url in crawled_url:
                print('==数据库中已经存在==' * 20)
                continue
            crawled_url.append(url)
            yield Request(
                url=url,
                callback=self.parse_detail
                )
       
        next_url = response.xpath('//*[text()[contains(.,"下一页")]]/@href').extract_first("")
        url=parse.urljoin(response.url, next_url)

        if url:
            yield Request(url=url, callback=self.get_list_page)


    def parse_detail(self, response):
        
        try:
            item = BjzphItem()
            item['title'] = response.css('.title::text').extract_first().strip()
            item['url'] = response.url
            item['city'] = response.css('.cityname::text').extract_first().strip().split('： ')[-1]
            item['begin_date'] = response.css('.starttime::text').extract_first().strip().split('： ')[-1]
            item['end_date'] = response.css('.endtime::text').extract_first().strip().split('： ')[-1]
            item['address'] = response.css('.address::text').extract_first().strip().split('： ')[-1]
            yield item
        except Exception as e:

            with open(r'error.log','a', encoding='UTF8') as w:
                w.write(response.url+'-->'+str(e)+"\n")