# -*- coding: utf-8 -*-
# 全部使用正则
import scrapy
import re
from Recruit.items import QianChengItem

class QianchengSpider(scrapy.Spider):
    name = 'qiancheng'
    allowed_domains = ['51job.com']
    # start_urls = ['http://51job.com/']

    # 定义初始URL
    def start_requests(self):
        # base_url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,Linux,2,{}.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
        # base_url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,UI,2,{}.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
        base_url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,%25E4%25BC%259A%25E8%25AE%25A1,2,{}.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
        for i in range(1):
            url = base_url.format(i+1)
            req = scrapy.Request(url=url,callback=self.parse)
            yield req

    def parse(self, response):
        #获取URL
        img_urls = response.xpath("//div[@class='el']/p/span/a/@href").extract()
        for i in img_urls:
            req = scrapy.Request(url = i,callback=self.parse_detail)
            yield req

    def parse_detail(self,response):
        try:
            body = response.text
            # 岗位
            pat = re.compile('title=".*?"')
            title = pat.findall(body)[1]
            title = title.split('"')[1]
            # 薪水
            pattern = '<strong>.*</strong>'
            salary = re.findall(pattern,body)[1]
            salary = salary.split('>')[1].split('<')[0]
            #公司
            company = pat.findall(body)[2]
            company = company.split('"')[1]
            # 总的信息
            zong = pat.findall(body)[3]
            all = zong.split('|')
            new_list = []
            for i in all:
                pattern = '[\u4E00-\u9FA5A-Z0-9-]+'
                new_list.append(re.findall(pattern,i)[0])
            addr = new_list[0]                 # 地址
            experience = new_list[1]           # 经验
            Education = new_list[2]            # 学历
            time = new_list[4]                 # 发布时间
            # 网站名称
            name = pat.findall(body)[0]
            name = name.split('"')[1][0:4]
            # 网站URL
            this_url = response.url
            # print(this_url)

            # 岗位描述
            # 使用正则匹配，但是匹配不全
            pattern = '<div class="bmsg job_msg inbox">[\s\S]*?<div class="mt10">'
            first_desc = re.findall(pattern, body)
            pattern = '<[a-zA-Z0-9/" ]*>'
            # second_desc = re.findall(pattern,first_desc[0])
            second_desc = re.sub(pattern,'',first_desc[0])
            pattern = '<div .*>'
            new_desc = re.sub(pattern,'',second_desc)
            # print(third_desc.strip())

            # 使用xpath
            # desc = response.xpath('//div[@class="bmsg job_msg inbox"]//text()').extract()
            # new_desc = []
            # for i in desc:
            #     i = i.strip()
            #     new_desc.append(i)
            # new_desc = ''.join(new_desc)

            item = QianChengItem()
            item['name'] = name
            item['title'] = title
            item['salary'] = salary
            item['company'] = company
            item['addr'] = addr
            item['experience'] = experience
            item['Education'] = Education
            item['time'] = time
            item['new_desc'] = new_desc
            item['this_url'] = this_url
            yield item
        except:
            pass
