# -*- coding: utf-8 -*-
import sys
from scrapy import Spider
from scrapy.http import Request
from scrapy.selector import Selector
from tutorial.items import ZhiLianItem
import re
reload(sys)
sys.setdefaultencoding('utf8')

class ZhilinaSpider(Spider):
    name = "zhilian"
    allowed_domains = ["zhaopin.com"]
    start_urls = ['http://zhaopin.com/']
    list_num=1

    def parse(self, response):
        baseurl="http://sou.zhaopin.com/jobs/searchresult.ashx?jl={area}&sm=1&kw={keyword}&p=1"
        href=baseurl.format(area=u'选择地区',keyword=u'数据分析师',page=1)
        yield Request(url=href, callback=self.parse_get_list)
    
    
    def parse_get_list(self,response):
        if (response.status <= 300):
            sel = Selector(response)
            table_list=sel.xpath('//div[@id="newlist_list_content_table"]/div[@class="newlist_detail newlist"]')
            item=ZhiLianItem()
            
            word_split=lambda x,y:x[y] if len(x) else ''
            re_find=lambda x,y:re.search(y,x).groups()[1] if re.search(y,x)!=None else ''
            for table_item in table_list:
                zwmc=table_item.xpath('string(div/ul/li[@class="newlist_deatil_first clearfix zwmc"]/div/a)').extract()#职位名称
                href=table_item.xpath('div/ul/li[@class="newlist_deatil_first clearfix zwmc"]/div/a/@href').extract()#链接
                gsmc=table_item.xpath('string(div/ul/li[@class="newlist_deatil_three gsmc"])').extract()#公司名称
                info=table_item.xpath('div/ul/li[@class="newlist_deatil_two"]').extract()#工作地点/公司性质/公司规模/经验/学历/职位月薪
                gxsj=table_item.xpath('string(div/dl/p)').extract()#更新时间
                ramark=table_item.xpath('string(div/ul/li[@newlist_deatil_last])').extract()#备注

                if len(href):
                    info=info[0].decode('utf-8','ignore').encode('utf-8','ignore')
                    #地点数据获取
                    re_gzdd=re.search(r"<span>地点：(.*?)</span>",info)
                    if re_gzdd:
                        gzdd=re_gzdd.groups()[0].split('-')[0]
                    else:
                        gzdd='无'
                    #职位月薪数据获取
                    re_zwyx=re.search(r"<span>职位月薪：(.*?)元/月</span>",info)
                    if re_zwyx:
                        zwyx_group=re_zwyx.groups()[0]
                        zwyx_split=zwyx_group.split('-')
                        if len(zwyx_split)>1:
                            zwyx=(int(zwyx_split[1])+int(zwyx_split[0]))/2
                        else:
                            zwyx=zwyx_split[0]
                    else:
                        zwyx='0'
                    #公司性质数据获取
                    re_gsxz=re.search(r"<span>公司性质：(.*?)</span>",info)
                    if re_gsxz:
                        gsxz=re_gsxz.groups()[0]
                    else:
                        gsxz='无'
                    #公司规模数据获取
                    re_gsgm=re.search(r"<span>公司规模：(.*?)</span>",info)
                    if re_gsgm:
                        gsgm=re_gsgm.groups()[0]
                    else:
                        gsgm='无'
                    #学历数据获取
                    re_xl=re.search(r"<span>学历：(.*?)</span>",info)
                    if re_xl:
                        xl=re_xl.groups()[0]
                    else:
                        xl='无要求'
                    #经验数据获取
                    re_gzjy=re.search(r"<span>经验：(.*?)</span>",info)
                    if re_gzjy:
                        gzjy=re_gzjy.groups()[0]
                    else:
                        gzjy='无要求'
                    item['zwmc']=zwmc[0].decode('utf-8','ignore').encode('utf-8','ignore')
                    item['href']=href[0]
                    item['zwyx']=zwyx
                    item['gsmc']=gsmc[0].decode('utf-8','ignore').encode('utf-8','ignore')
                    item['gzdd']=gzdd.decode('utf-8','ignore').encode('utf-8','ignore')
                    item['gxsj']=gxsj[0]
                    item['gsxz']=gsxz.decode('utf-8','ignore').encode('utf-8','ignore')
                    item['gsgm']=gsgm.decode('utf-8','ignore').encode('utf-8','ignore')
                    item['gzjy']=gzjy.decode('utf-8','ignore').encode('utf-8','ignore')
                    item['xl']=xl
                    item['remark']=ramark[0].decode('utf-8','ignore').encode('utf-8','ignore')
                    yield item
                
            nextHref = sel.xpath("//li[@class='pagesDown-pos']/a/@href").extract()
            if len(nextHref):
                yield Request(url=nextHref[0], callback=self.parse_get_list)