import re
import requests
from os import path


def get_index(url):
    '''
    :param url: 传入一个列表URL进行请求验证
    :return: 返回一个能够成功请求数据的URL
    '''
    respose = requests.get(url)
    if respose.status_code == 200:
        respose.encoding = 'utf-8'
        return respose.text


def parse_index(res):
    '''
    :param res: 传入一个请求网页的数据(html)，然后在数据里面查找图片URL
    :return: 返回找到的图片URL
    '''
    print(res)
    # urls = re.findall(r'class="mr-3" src="(.*?)"', res, re.S)  # re.S 把文本信息转换成1行匹配
    # return urls


def get_url(urls):
    '''
    :param urls: 接受传入的实际图片URL地址进行爬取
    :return:
    '''
    for url in urls:
        print(f'正在爬取--->[ {url} ]')
        if not url.startswith('http'):
            url = f'http://www.xiaohuar.com{url}'
        result = requests.get(url)
        if result.status_code == 200:
            print(f'爬取--->[ {url} ]成功')
            save(url=url, i=i)


def save(url, i):
    video = requests.get(url)
    if video.status_code == 200:
        print(f'[ {url} ]请求成功')
        url_name = url.split("/")[-1]  # 截取URL的文件名(-1就是以/为准，截取最后一组,这样就得到了原始文件名)
        file_name = str(i) + '_' + url_name  # 组合成初级文件名(i作为网页ID，以此区分图片来源)
        save_name = paths + f'\{file_name}'  # 将路径与文件名拼接，得到完整的路径
        if path.exists(save_name):  # 判断文件是否存在，避免资源浪费
            print(f'文件[ {save_name} ]已存在')
        else:
            try:
                with open(save_name, 'wb') as f:
                    f.write(video.content)
            except IOError:
                print(f'写入文件[ {save_name} ]错误')


paths = 'D:\Y\Pictures\爬虫'
if not path.isdir(paths):
    print(f'文件夹[ {paths} ]不存在，请重新设置')
    exit(1)

##分别设置起点ID和终点ID
qidian = 1234
zhongdian = 1235
for i in range(qidian, zhongdian):
    res1 = get_index(f'https://www.500d.me/newcvresume/edit/?itemid=255&resumeId=9131118')
    res2 = parse_index(res1)
    # get_url(res2)
