# -*- coding: utf-8 -*-
from selenium import webdriver
from bs4 import BeautifulSoup
import os
from urllib.request import urlretrieve
import time
import threading

browserPath = '/path/to/phantomjs/bin/phantomjs'
homePage = 'https://mm.taobao.com/search_tstar_model.htm'
driver = webdriver.PhantomJS(executable_path=browserPath, service_args=['--load-images=no'])

driver.set_window_size(1920, 2000)
driver.get(homePage)
time.sleep(3)

all_models = []
for i in range(3):
    soup = BeautifulSoup(driver.page_source, "lxml")
    allItem = soup.findAll(class_="item")
    all_models.extend(allItem)

    print('点击下一页')
    driver.find_element_by_link_text('下一页 >').click()
    time.sleep(3)

print(len(all_models))
for item in all_models:
    detail_url = item.find('a')['href']  #获取个人主页连接
    img = item.find("img")
    img_url = img.get("data-ks-lazyload") or img.get('src')
    name = item.find(class_="name").get_text()
    city = item.find(class_="city").get_text()
    print(name,city)
    dir_city = 'photos/' + city
    if not os.path.exists(dir_city):   #如果city文件夹不存在新建
        os.makedirs(dir_city)
    dir_name = dir_city + "/" + name   #获取名字和城市名组成文件夹名字
    if not os.path.exists(dir_name):   #如果文件夹不存在新建
        os.makedirs(dir_name)
    if detail_url.startswith("//"):
        detail_url = "http:"+detail_url
    if img_url.startswith("//"):
        img_url = "http:"+img_url
    print("detail_url=%s"%detail_url)
    print("img_url=%s"%img_url)
    filename = dir_name + '/'+ img_url.split('/')[-1]
    if not os.path.exists(filename):
        urlretrieve(img_url, filename)
    # get detail
    driver.get(detail_url)
    time.sleep(3)
    soup_detail = BeautifulSoup(driver.page_source, "lxml")
    all_img = soup_detail.find(class_="mm-aixiu-content").findAll('img')
    for img in all_img:
        img_url = img.get('src')
        if img_url:
            if img_url.startswith("//"):
                img_url = "http:" + img_url
            filename = dir_name + '/' + img_url.split('/')[-1]
            if not os.path.exists(filename):
                print('downloading', filename)
                threading.Thread(target=urlretrieve, args=(img_url, filename)).start()
                print(threading.active_count())
                while threading.active_count() > 3:
                    threading.Event().wait(3)
                    print(threading.active_count())

driver.close()