# -*- coding: utf-8 -*-
"""
Created on 2021-12-15 10:59:46
---------
@summary:
---------
@author: 74793
"""
import json
import random
import re

import time
import feapder
from utils.log import logger
from items import job_info_item
from feapder.db.mysqldb import MysqlDB

from utils.db.mongo_pool import MongoPool
from utils.proxy import Proxy


class ListSpider(feapder.AirSpider):

    def start_requests(self):
        db = MysqlDB()
        sql = """select detail_job from job_detail"""
        jobs = db.find(sql)
        for job in jobs[51:70]:
            job = job[0]
            for i in range(1, 31):
                url = f"https://www.lagou.com/wn/jobs?px=new&pn={i}&cl=false&fromSearch=true&labelWords=sug&suginput={job}&kd={job}"
                start_url = f"https://www.lagou.com/wn/jobs?px=new&pn={i}&kd={job}&fromSearch=true&city=全国"
                time.sleep(random.randint(0, 2))
                try:
                    response = feapder.Request(url, render=True)
                    yield response
                except Exception as e:
                    pass
            time.sleep(random.randint(5, 13))

    def download_midware(self, request):
        mongoPool = MongoPool()
        ip = mongoPool.random()
        logger.info(ip)
        request.proxies = {
            'http': 'http://{}'.format(ip),
            'https': 'https://{}'.format(ip)
        }
        return request


    def validate(self, request, response):
        """
        @summary: 校验函数, 可用于校验response是否正确
        若函数内抛出异常，则重试请求
        若返回True 或 None，则进入解析函数
        若返回False，则抛弃当前请求
        可通过request.callback_name 区分不同的回调函数，编写不同的校验逻辑
        ---------
        @param request:
        @param response:
        ---------
        @result: True / None / False
        """
        if response.status_code != 200:
            raise Exception("response code not 200")  # 重试

        if response.status_code == 404:
            return False  # 则抛弃当前请求, 不会进入解析函数

    def parse(self, request, response):
        try:
            props = response.xpath('//script[@id="__NEXT_DATA__"]/text()').extract()[0]
            result = json.loads(props)
            # print("props:" + str(type(props)), "  result:" + str(type(result)))
            results = result["props"]["pageProps"]["initData"]["content"]["positionResult"]['result']
            # print(results)

            for job in results:
                # # 工作岗位
                job_name = job["positionName"]
                # 公司全称
                company = job["companyFullName"]
                # 地址
                address = str(job["city"]) + "·" + str(job["stationname"])
                # 薪资
                salary = job["salary"]
                # 招聘岗位经验要求
                workYear = job["workYear"]
                # 学历要求
                education = job["education"]
                # 岗位技能要求
                jobLables = job["positionLables"]
                # 工业领域
                industryField = job["industryField"]
                # 工作性质（全职、兼职、实习）
                jobNature = job["jobNature"]
                # 待遇
                advantage = job["positionAdvantage"]
                # 融资类型
                financeStage = job["financeStage"]
                # 公司规模
                companySize = job["companySize"]
                # 岗位类型1 firstType
                firstType = job["firstType"]
                # 岗位类型2 secondType
                secondType = job["secondType"]
                # 岗位类型3 thirdType
                thirdType = job["thirdType"]
                # 岗位发布时间
                createTime = job["createTime"]
                # 岗位详情
                jobDetail = job["positionDetail"]
                dr = re.compile(r'<[^>]+>', re.S)
                jobDetail = dr.sub('', jobDetail)

                response_data = {
                    "job_name": job_name, "company": company, "address": address,
                    "salary": salary, "workYear": workYear, "education": education,
                    "jobLables": jobLables, "industryField": industryField, "jobNature": jobNature,
                    "advantage": advantage, "financeStage": financeStage, "companySize": companySize,
                    "firstType": firstType, "secondType": secondType, "thirdType": thirdType,
                    "createTime": createTime
                }

                list_item = job_info_item.JobInfoItem(**response_data)


                yield list_item
            print(request.url + "====>成功")
        except Exception as e:
            print("爬虫异常 " + request.url, e)





if __name__ == "__main__":
    ListSpider(thread_count=10).start()
