# coding= utf-8
# 拉勾网爬虫遭遇反爬虫(引入headers)

import requests
from bs4 import BeautifulSoup
import json
import urllib.request

def main():
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3664.3 Safari/537.36',
        'Host' : 'www.lagou.com',
        'Referer' : 'https://www.lagou.com/jobs/list_Python?city=%E5%85%A8%E5%9B%BD&cl=false&fromSearch=true&labelWords=&suginput=',
        'X-Anit-Forge-Code': '0',
        'X-Anit-Forge-Token': None,
        'X-Requested-With': 'XMLHttpRequest'
    }
    fore_data = {
        'first': 'true',
        'pn': '1',
        'kd': 'Python'
    }
    url = 'https://www.lagou.com/jobs/list_Python?city=%E5%85%A8%E5%9B%BD&cl=false&fromSearch=true&labelWords=&suginput='
    result = requests.get(url,headers = headers,data=fore_data)
    soup = BeautifulSoup(result.content, 'lxml')
    result1 = soup.decode("UTF-8")
    print(soup)
    print(type(soup))

    '''
        以下为说明;
        由于Python3 和 Python2 的不同，Python3下来的数据并非(即使经过soup处理也不是json格式的数据，所以无法进行下一步的处理)
    
    '''
    '''
        Python3 常用bs4 处理，直接找标签吧
        # 利用BeautifulSoup()进行进一步的解析(lxml),lxml解析库需要提前引入
        soup = bs4.BeautifulSoup(content, 'lxml')
        print(soup)
        # 获取所有html中img标签内的内容，用class的值作为划分的标准,class也作为新参数
        img_list = soup.find_all('img', attrs={'class':'img-responsive lazy image_dta'})
        一般处理方法
    '''
    '''
        json 文件写入格式：
        json_result = result.json()
        print(type(json_result))
        # 具体信息需要json地点
        positions = json_result['content']['positionResult']['result']
        # ensure_ascii=False 指定输出例如中文的真正样子而不是字符形式，因为一般默认ascii值
        line = json.dumps(positions,ensure_ascii=False)
        with open('lagou.json','w') as fp:
            fp.write(line.encode('utf-8'))
        
        
        
    '''
    # response = urllib.request.urlopen(url)
    # content = response.read()
    # print(str(content), 'utf-8')
    # print(result.content)
    # result = urllib.request.Request(url=url, headers=headers)
    # print(result.content)
    # data = urllib.request.urlopen(result).read()
    # print(data.decode("UTF-8"))
    # print(type(data.decode("UTF-8")))
    # print(result.content)
    # print(type(result.content))
    # print(result.content)
    # json_result = soup.json()
    # print(type(json_result))
    # positions = json_result['content']['positionResult']['result']
    # line = json.dumps(positions,ensure_ascii=False)
    # with open('lagou.json','w') as fp:
    #     fp.write(line.encode('utf-8'))

if __name__ == '__main__':
    main()