# -*- coding: utf-8 -*-
import scrapy
from myspider import items
from lxml import etree  # 解析 xml 文档的三方包，etree 解析器包含 xpath 的语法，xpath主要解析html格式数据


class MySpider(scrapy.Spider):
    name = 'job'
    allowed_domains = ['51job.com']
    start_urls = [
        'https://jobs.51job.com/xian-gxjs/130821422.html?s=sou_sou_soulb&t=0',
        'https://jobs.51job.com/xian-gxjs/130816711.html?s=sou_sou_soulb&t=0',
    ]

    # HTML 返回列表 str 元数据提取（防止操作空数据导致的异常，默认值设定为“-”）
    @staticmethod
    def getlistdata(listtemp, num):
        if len(listtemp) > num and type(listtemp) == list:
            return listtemp[0].strip()
        else:
            return "-"

    # 清理列表中存在的空白字符串数据
    @staticmethod
    def datastrip(listtemp):
        if type(listtemp) == list:
            listresult = [v.strip() for v in listtemp]
            while '' in listresult:
                listresult.remove('')
        return listresult

    # 当数据长度过长时，截取数据库长度的数据保存
    @staticmethod
    def cutdata(data):
        if type(data) == str:
            return data[:255]
        else:
            return "-"

    def parse(self, response):

        data = items.MyspiderItem()

        htmldata = etree.HTML(response.text)

        data['name'] = self.cutdata(
            self.getlistdata(htmldata.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/h1/@title'), 0))
        data['wage'] = self.cutdata(
            self.getlistdata(htmldata.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/strong/text()'), 0))
        data['company'] = self.cutdata(
            self.getlistdata(htmldata.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/p[1]/a/@title'), 0))
        data['info'] = self.cutdata(
            '-'.join(self.datastrip(htmldata.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/p[2]/text()'))))
        yield data
