# coding = utf-8
"""
采集图片
"""
import requests
from bs4 import BeautifulSoup
from os import path

class Crawl:
    def __init__(self, content, page, path):
        """
        初始化
        :param content: 采集内容 'ooxx'(妹子图) or 'pic'(无聊图)
        :param page: 采集当前页
        """
        self.url = "http://jandan.net/%s/page-%d#comments" % (content, page)
        self.content = None
        self.pic = []
        self.savepath = path

    def http_get(self):
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4',
            'Connection': 'keep-alive',
            'Host': 'jandan.net',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
                          '(KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
        }
        self.content = requests.get(self.url, headers=headers, timeout=30)
        return self

    def html_parse(self):
        if self.content.status_code == 200:
            soup = BeautifulSoup(self.content.content, 'lxml')
            img = soup.select('a[class="view_img_link"]')
            if len(img) > 0:
                self.pic = ["http:" + path['href'] for path in img]
        return self

    def save(self):
        if self.pic:
            for pic in self.pic:
                pic_name = path.basename(pic)
                image_content = requests.get(pic, stream=True)
                with open(self.savepath + pic_name, 'wb+') as pic_file:
                    pic_file.write(image_content.content)
                    print(pic_name + '   保存成功')


if __name__ == '__main__':
    Crawl('pic', 1, '').http_get().html_parse()
