# -*- coding: utf-8 -*-
import scrapy
import re
import dateparser
from chardet import detect

import json
from tinydb import TinyDB, Query
from furl import furl
from scrapy import Selector

from Jobs.items import JobsItem


class HuibospiderSpider(scrapy.Spider):
    name = 'HuiboSpider'
    allowed_domains = ['www.huibo.com']
    start_urls = ['http://www.huibo.com/jobsearch/?key=python']
    # cache_db = TinyDB('HuiboSpider-cache.json')  # 缓存数据库

    def parse(self, response):
        # 要爬取的页面的URL
        url = response.url
        # import ipdb; ipdb.set_trace()
        req = scrapy.Request(url, callback=self.parse_city, dont_filter=True)
        yield req

        
    def parse_city(self, response):
        '''解析具体的页面'''
        resp = Selector(None, response.body_as_unicode().replace('\r', '').replace('\n', ''), 'html')
        # import ipdb as pdb; pdb.set_trace()    
        for item in resp.xpath('//div[@class="postSearchLeft"]/div[@class="postIntro"]')[:-1]:
            # 公司名称                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                        
            company = item.xpath('.//div[@class="title"]/a/text()').extract_first().strip()
            # company = company.re.sub('\s', '', company)
            # 岗位名称
            jobName = item.xpath('.//span[@class="name"]/a/text()').extract_first().strip()
            # jobName = jobName.re.sub('\s', '', jobName)
            #岗位详细信息URL
            job_detail_url = item.xpath('.//span[@class="name"]/a/@href').extract_first().strip()
            # 薪资
            money = item.xpath('.//span[@class="money"]/text()').extract_first().strip()
            # money = money.re.sub('\s', '', money)
            address = item.xpath('.//span[@class="address"]/text()').extract_first().strip()
            # address = address.re.sub('\s', '', address)
            # 工作经验
            # import ipdb as pdb; pdb.set_trace()
            exp = item.xpath('.//span[@class="exp"]/text()').extract_first().strip()
            # exp = exp.re.sub('\s', '', exp)
            # 招聘开始时间
            job_time = item.xpath('.//span[@class="job_time"]/text()').extract_first().strip()
            # print(job_time)
            job_time = self.parse_date(job_time)
            # job_time = job_time.re.sub('\s', '', job_time)
            # print(company,jobName,job_detail_url,money,address,exp,job_time)
            items = JobsItem()
            items["jobName"] = jobName  
            items["city"] =address
            items["company"] = company
            items["workingExp"] = exp
            items["positionURL"] = job_detail_url
            items["salary"] = money
            items["startDate"] =job_time 
            items["spiderName"] = self.name
            yield items

        # req = scrapy.Request(job_detail_url, callback=self.parse_detail, dont_filter=True)
        # yield req

         
    def parse_date(self, job_time):
        d = dateparser.parse(job_time)
        time = str(d.year)[-2:] +"/"+ str(d.month)+ "/" + str(d.day)
        if d.month < 10 and d.day < 10:
            time = str(d.year)[-2:] +"/0"+ str(d.month)+ "/0" + str(d.day)
        return  time 

    # def parse_detail(self, response):
    #     # d = response.css('.newJobDtl .iconNew02.next()')
