#!/usr/bin/env python
#-*-coding:utf-8-*-
__author__ = 'Mzx'

import requests,base64,time,re

from lxml import etree

city_nums = ['749','530','538','763','765','653']

url = 'http://localhost:8080/page/MS1hbGljZQ==/12/{}'


headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
    'Referer':'http://localhost:8080/page/MS1hbGljZQ==/12/749'
}
with open("data.csv", "a", encoding='utf-8', newline='') as fp:
    for k in city_nums:
        url_tmp1 = url.format(k)
        html = etree.HTML(requests.get(url=url_tmp1,headers=headers).text)
        page_num = int(html.xpath("//span[@class='layui-badge-rim']/text()")[-1].split("/",1)[1]) + 1

        for i in range(1,page_num):
            try:
                time.sleep(0.5)
                url_spider = 'http://localhost:8080/page/'+str(base64.b64encode(str.encode(str(i)+'-alice')))[2:-1]+'/12/'+k
                context_text = requests.get(url=url_spider,headers=headers).text
                html_page = etree.HTML(context_text)
                # 职位名称
                job_name = html_page.xpath("//legend[@class='center-to-head']/a/span/text()")

                # 薪水
                job_salary = html_page.xpath("//div[@class='layui-row']/div[@class='layui-col-md1'][1]/text()")

                # 所在城市
                firm_city = html_page.xpath("//div[@class='layui-row']/div[@class='layui-col-md1'][2]/span/text()")

                # 工作年限
                job_time = html_page.xpath("//div[@class='layui-row']/div[@class='layui-col-md1'][3]/text()")

                # 学历要求
                job_city = html_page.xpath("//div[@class='layui-row']/div[@class='layui-col-md1'][4]/text()")

                # 公司名称
                firm_name = html_page.xpath("//div[@class='layui-row']/div[@class='layui-col-md6']/a/text()")


                # 公司规模
                firm_scala = html_page.xpath("//div[@class='layui-row']/div[@class='layui-col-md2']/span/text()")

                # 福利待遇
                welfare = html_page.xpath("//div[@class='operation']/div[@class='tags']/span/i/text()")
                for j in range(0,len(welfare)):
                    welfare[j] = welfare[j].replace(",","").replace("，","")

                context_id = html_page.xpath("//legend[@class='center-to-head']/a/@id")




                url_end1 = 'http://localhost:8080/getPosition?id={}'

                # 发布时间
                pub_time = []
                # 岗位要求
                requir = []

                # 工作地点
                job_address = []


                for j in context_id:

                    url_end2 = url_end1.format(str(base64.b64encode(str.encode(j)))[2:-1])
                    # print(url_end.format(str(base64.b64encode(str.encode(i)))[2:-1]))
                    page_html = requests.get(url=url_end2,headers=headers).text
                    page_text = etree.HTML(page_html)

                    # 发布时间（爬取时间）
                    pub_time.append(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))

                    # 岗位要求
                    try:
                        if re.findall(r'岗位要求</div>.*?<div class="layui-card-body">(.*?)</div>',page_html,re.S)[0] != '':
                            requir.append(re.findall(r'岗位要求</div>.*?<div class="layui-card-body">(.*?)</div>', page_html, re.S)[0])
                        else:

                            requir.append("无岗位要求")
                    except:
                        requir.append("无岗位要求")

                    # 工作地点
                    try:
                        job_address.append(page_text.xpath("//div[@class='layui-card-body']/text()")[3])
                    except:
                        job_address.append("无工作地点")

                for j in range(0,len(requir)):
                    requir[j] = requir[j].replace(",","").replace("，","")
                    job_address[j] = job_address[j].replace(",","").replace("，","")

                for j in range(0,len(context_id)):
                    str_tmp = job_name[j] + "," + job_salary[j]  + "," + firm_city[j]  + "," +  job_time[j]  + ","+ job_city[j]  + "," + firm_name[j]  + "," + firm_scala[j]  + "," + welfare[j] + "," + str(pub_time[j]) + ","+ requir[j] + "," + job_address[j] +"\n"
                    fp.write(str_tmp)
                    print(str_tmp)

            except:
                print("程序错误，跳过爬取")
                continue
