#!/usr/bin/env python
# -*-coding:UTF-8 -*-
'''
@Project ：爬虫-波波老师
@File：25-同步爬虫.py
@Author ：文非
@Date：2021/3/1414:53
@Require： 同步爬虫的实现过程  本程序主要实现单线程同步串行的方式 遇到阻塞等待在执行后面的爬虫
'''

import requests


def get_content(url, headers):
    """数据获取"""
    # 1 指定url
    print("正在爬取", url)
    # 2 发送请求
    response = requests.get(url=url, headers=headers)
    # 3 返回响应数据
    if response.status_code == 200:
        return response.content

def parse_content(content):
    """数据解析"""
    print("响应数据的长度为：", len(content))

def main():

    url_list = [
        'https://downsc.chinaz.net/Files/DownLoad/jianli/202101/jianli14390.rar',
        'https://downsc.chinaz.net/Files/DownLoad/jianli/202012/jianli14173.rar',
        'https://downsc.chinaz.net/Files/DownLoad/jianli/202011/jianli14051.rar',
        'https://downsc.chinaz.net/Files/DownLoad/jianli/202011/jianli13928.rar'
    ]
    headers = {
        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"
    }
    for url in url_list:
        content = get_content(url, headers)
        parse_content(content)


if __name__ == "__main__":
    main()
