import requests
from lxml import etree
import re, csv
import time
import random


# 爬取猿急送招聘信息


class Myspider:
    def __init__(self):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36'
        }

    def get(self):
        page = input("输入你要爬取的页数>>>")
        res = {}
        for i in range(1, int(page) + 1):
            url = f'https://www.yuanjisong.com/job/allcity/page{i}'
            response = requests.get(url=url, headers=self.headers)
            print("第%s页数据爬取完成!" % i)
            #             解析数据
            res["page%s" % i] = response.text
        return res

    #     解析数据
    def parse(self, res):
        data = []
        # 序列化html文件
        for page in res:
            # 循环遍历出html源代码
            html = etree.HTML(res[page])  # 序列化相应页
            # 拿到当前页的岗位列表[post1,post2]
            list_post = html.xpath(
                '//div[@id="db_adapt_id"]/div[position()>2]'
            )
            for post in list_post:
                # 标题
                post_title = post.xpath('./a/div/div/text()')
                # 岗位要求
                require = post.xpath('./div[1]/div/div/a/p/text()')
                # 工时
                date = post.xpath('./div[1]/div[3]/div/p/span[3]/text()')
                # 工资
                salary = post.xpath('./div[1]/div[4]/div/p/span[3]/text()')
                data.append({'post': post_title[0], "detail": require[0].strip(), 'salary': salary[0], 'date': date[0]})
        return data

    def save(self, data):
        with open('data/猿急送岗位.csv', 'w', encoding='utf-8_sig') as f:
            fields = ['项目', '项目需求', '工时', '总价']
            #                 构造一个写入csv对象
            writer = csv.DictWriter(f, fieldnames=fields)
            writer.writeheader()
            for post in data:  # 拿到每一个项目字典
                data = {'项目': post['post'], '项目需求': post['detail'], '工时': post['date'], '总价': post['salary']}
                writer.writerow(data)


if __name__ == '__main__':
    sp = Myspider()
    res = sp.get()
    data = sp.parse(res)
    sp.save(data)
