# -*- coding: utf-8 -*-
# !/usr/bin/env python
# 爬取拉勾网职位信息保存到表格，但是不能正常获取每个职位描述信息
import csv
import time, re, json
import requests
from urllib.parse import quote
from lxml import etree


class LG(object):
    def __init__(self, city):
        self.data_list = []
        # self.start_url = 'https://www.lagou.com/jobs/list_%E7%88%AC%E8%99%AB/p-city_213'
        self.start_url = 'https://www.lagou.com/jobs/list_'
        self.url = 'https://www.lagou.com/jobs/positionAjax.json'
        self.default_headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36",
        }
        self.query = dict(
            city=city,
            needAddtionalResult="false"
        )
        self.s = requests.Session()
        self.get_response = requests.get(url=self.start_url, headers=self.default_headers, timeout=3)
        csv_fileName = time.strftime("%Y-%m-%d",time.localtime())
        self.csv_fp = open(csv_fileName + 'lagou.csv','w',newline='')
        file_name = ['公司名字', '公司城市', '招聘职位',
                     '发布时间', '薪资待遇', '经验要求',
                     '公司规模', '公司福利', '公司地址', '地址','职位详情url']
        self.writer = csv.writer(self.csv_fp)
        self.writer.writerow(file_name)

    def get_page(self, city, kd):
        kd = quote(kd)
        patt = re.compile(r'global.cityNumMap =(.*?);')
        cityNumMap = patt.findall(self.get_response.text)[0]
        # 城市对应的号码，供链接地址city_url使用
        dic_cityNum = json.loads(cityNumMap)
        cityNum = dic_cityNum[city]
        city_url = f'https://www.lagou.com/jobs/list_{kd}/p-city_{cityNum}'
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36",
            'Referer': city_url
        }
        # 目的为了获取cookie和后面的页数
        response = self.s.get(url=city_url, headers=self.headers, timeout=3)
        html = etree.HTML(response.text)
        pages = html.xpath('//div[@class="page-number"]/span[last()]/text()')[0]
        print(f"总共获取到{pages}页数据!")
        return (int(pages), self.headers)

    # 新增加了sid的获取
    def get_sid(self, kd ,headers):
        self.data = dict(
            first=True,
            pn=1,
            kd=kd,
        )
        cookies = self.s.cookies
        response = self.s.post(url=self.url, params=self.query, data=self.data, headers=headers, cookies=cookies,
                               timeout=3)
        content = json.loads(response.text)
        sid = content['content']['showId']
        return sid

    def main(self, kd, page,sid):
        if page == 1:
            self.data = dict(
                first=True,
                pn=page,
                kd=kd,
            )

        else:
            self.data = dict(
                first=False,
                pn=page,
                kd=kd,
                sid=sid
            )
        return self.data

    def parse(self, data, headers, page,sid):
        time.sleep(2)
        cookies = self.s.cookies
        response = self.s.post(url=self.url, params=self.query, data=data, headers=headers, cookies=cookies, timeout=3)
        time.sleep(5)
        content = json.loads(response.text)
        # with open("lagou_"+str(page)+'.html',"w",encoding='utf-8')as f:
        #     f.write(response.text)
        result = content['content']['positionResult']['result']
        self.write_csv(result)
        for i in result:
            s = [i['companyFullName'], i['city'], i['positionName'],
                 i['createTime'], i['salary'], i['workYear'],
                 i['companySize'], i['positionAdvantage'],
                 i['district'], i['stationname']]
            print(s)
            # self.job_info(i['positionId'],sid)
            self.data_list.append(i['companyShortName'])
        print("共获取到本页数据:%s条" % len(result))
        # print(result)
    # 获取职位描述，没有解决，总是返回一个有如下网址的网页
    # https://www.lagou.com/utrack/trackMid.js?version=1.0.0.3&t=1607178029
    def job_info(self,positionId,show):
        # url = f'https://www.lagou.com/jobs/{positionId}.html?show='+show
        url = f'https://www.lagou.com/jobs/{positionId}.html'
        cookies = self.s.cookies
        # res = requests.get(url,headers=headers,cookies=cookies)
        res = self.s.get(url,headers=self.default_headers,cookies=cookies,timeout=3)
        html = etree.HTML(res.text)
        job_detailss = html.xpath('//div[@class="job-detail"]//text()')
        job_detail = "".join(job_detailss)
        time.sleep(1)
        try:
            job_address = html.xpath('//input[@name="positionAddress"]/@value')[0]
            print(job_detail.strip(), job_address)
        except:
            # url = 'https://www.lagou.com/utrack/trackMid.html'
            print("///***",res.url)
            # res2 = requests.get(res.url, headers=headers, cookies=cookies, timeout=3)

            with open(str(positionId) +".html",'w',encoding='utf-8')as f:
                f.write(res.text)

    def write_csv(self,result):
        for i in result:
            print("写入数据：",i['companyFullName'])
            if not i['stationname']:
                i['stationname'] = "空值"
            job_url = 'https://www.lagou.com/jobs/{}.html'.format(i['positionId'])
            s = [i['companyFullName'],i['city'],i['positionName'],
                        i['createTime'],i['salary'],i['workYear'],
                        i['companySize'],i['positionAdvantage'],
                        i['district'],i['stationname'],job_url]
            self.writer.writerow(s)


if __name__ == '__main__':
    kd = input("请输入职位名称(默认python爬虫)：")
    if kd == '':
        kd = 'python爬虫'
    city = input("请输入城市(默认北京)：")
    if city == '':
        city = '北京'
    lg = LG(city)
    # 先获取页面get_page
    pages, headers = lg.get_page(city, kd)
    # sid可以忽略
    sid = lg.get_sid(kd, headers)
    for page in range(1, pages + 1):
        print("******第%s页******" % page)
        data = lg.main(kd, page,sid)
        lg.parse(data, headers, page,sid)
    print("展示公司名字：", lg.data_list)
    lg.csv_fp.close()