#!/usr/bin/python
# -*- coding: utf-8 -*-

import urllib
import os
import requests
from bs4 import BeautifulSoup

# 需要爬虫图片的网站
BASE_URL = "http://www.zhihu.com"
QUESTION_URL = BASE_URL+"/topic/20022251?page=%s"
BASE_DIR = 'F:/picture/'

#写一个方法判断是否唯一
def is_unique(question_list,title):
    for a in question_list:
        if(a['title'] == title):
            return False
    return True

question_list = []
#进行网页请求并抓取里面的数据
for i in range(1, 11):
    #发起请求获取网站内容
    r = requests.get(QUESTION_URL % i)
    bs = BeautifulSoup(r.text, "html.parser")
    #一般使用css selector来选择控件 然后抓取数据分装起来
    content_list = bs.select("div .content")
    for content in content_list:
        content_bs = BeautifulSoup(content.text, "html.parser")
        # print h2_item
        comment_count = content.select(".toggle-comment")
        if len(comment_count) != 0: #获取评论数 处理错误
            try:
                comment = int(comment_count[0].text.split(" ")[0])
            except:
                comment = 0

            if comment > 500: #筛选热门话题，指标评论过500
                a_list = content.select("h2 > a")
                for a in a_list:
                    title = a.text.replace("?", "")
                    if is_unique(question_list, title):
                        question = dict(title=title, url=BASE_URL+a['href'])
                        question_list.append(question)
# 根据url一个一个打开网页获取里面的数据
for q in question_list:
    #创建文件夹
    path = BASE_DIR + q['title'] +"/"
    if not os.path.exists(path):
        os.mkdir(path)
    print q['title']
    #逐个打开网站然后抓取里面的图片
    r_item = requests.get(q['url'])
    bs_item = BeautifulSoup(r_item.text, "html.parser")
    img_list = bs_item.select(".origin_image .zh-lightbox-thumb .lazy")
    x = 0

    for img in img_list:
        print '%s%s.jpeg' % (path,x)
        urllib.urlretrieve(img['data-actualsrc'], '%s%s.jpeg' % (path, x))
        x += 1




