#!/usr/bin/env python
# Create: 2018/10/12
__author__ = '749B'
"""爬取汽车之家新网咨询"""

import os
import pickle
import requests
from bs4 import BeautifulSoup

from utils.check_cache import get_pk_name

pk_name = get_pk_name(__file__)
response = None
if os.path.exists(pk_name):
    print("已经爬取过了，获取缓存的内容...")
    with open(pk_name, 'rb') as f:
        response = pickle.load(f)

# 只有在没有缓存过页面的时候才进行爬取
if not response:
    print("开始爬取页面...")
    response = requests.get('http://www.autohome.com.cn/news')
    # 爬完之后记得保存，下次就不用再去爬取了
    with open(pk_name, 'wb') as f:
        pickle.dump(response, f)

response.encoding = response.apparent_encoding  # 获取页面的编码，解决乱码问题
# print(response.text)

soup = BeautifulSoup(response.text, features='html.parser')
target = soup.find(id='auto-channel-lazyload-article')
# print(target)
# obj = target.find('li')
# print(obj)
li_list = target.find_all('li')
# print(li_list)
for i in li_list:
    a = i.find('a')
    # print(a)
    # print(a.attrs)  # 有些li标签里没有a标签，所以可能会报错
    if a:  # 这样判断一下就好了
        # print(a.attrs)  # 这是一个字典
        print(a.attrs.get('href'))  # 那就用操作字典的方法来获取值
        # tittle = a.find('h3')  # 这个类型是对象
        tittle = a.find('h3').text  # 这样拿到的才是文本
        print(tittle, type(tittle))  # 不过打印出来差不多，都会变成字符串，差别就是h3这个标签
        img_url = a.find('img').attrs.get('src')
        print(img_url)
        # 上面获取到了图片的url，现在可以下载到本地了
        # img_response = requests.get("http:%s" % img_url)
        # if '/' in tittle:
        #     file_name = "img/%s%s" % (tittle.replace('/', '_'), os.path.splitext(img_url)[1])
        # else:
        #     file_name = "img/%s%s" % (tittle, os.path.splitext(img_url)[1])
        # with open(file_name, 'wb') as f:
        #     f.write(img_response.content)




