#!/usr/bin/env python
# coding: utf-8

import re

import logging
logging.basicConfig(format='[%(asctime)s Line:%(lineno)d %(levelname)s] %(message)s', level=logging.DEBUG)

from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pyquery import PyQuery as pq
import os
import sys
import urllib.request
import json
from IPython import embed
import socket

# make urlretrieve must return in 20s
socket.setdefaulttimeout(20)


spider_url = sys.argv[1]
output_dir = sys.argv[2]
logging.info(spider_url)

# try:
#     browser = webdriver.PhantomJS()
#     print('Use PhantomJS as web driver')
# except:
#     browser = webdriver.Chrome()
#     print('Use Chrome as web driver')
browser = webdriver.Chrome()
# browser.set_window_size(1400, 900)

wait = WebDriverWait(browser, 10)

def search():
    try:     
        browser.get(spider_url)
        # input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#q')))
        # submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_TSearchForm > div.search-button > button')))
        # input.send_keys(KEYWORD)
        # submit.click()
        total = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#content > div.main > div.ui-page > div > b.ui-page-skip > form')))
        get_products()
        return total.text
    except TimeoutException:
        return search()

def next_page(page_number):
    logging.info('Get next page:{}'.format(page_number))
    try:
        input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#content > div.main > div.ui-page > div > b.ui-page-skip > form > input.ui-page-skipTo')))
        submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#content > div.main > div.ui-page > div > b.ui-page-skip > form > button')))
        input.clear()
        input.send_keys(page_number)
        submit.click()
        wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, '#content > div.main > div.ui-page > div > b.ui-page-num > b.ui-page-cur'), str(page_number)))
        get_products()
    except TimeoutException:
        next_page(page_number)

def get_products():
    wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#J_ItemList .product-iWrap')))
    html = browser.page_source
    doc = pq(html)
    items = doc('#J_ItemList .product-iWrap').items()
    # embed()
    for item in items:
        try:
            product = {
                
                'commodity_url': item.find('.productImg-wrap .productImg').attr('href'),
                'price': item.find('.productPrice').text(),
                'title': item.find('.productTitle').text(),
                'shop': item.find('.productShop').text(),
                'image_url': re.findall(re.compile('data-ks-lazyload="(.*?)" src="data:', re.S),item.find('.productImg-wrap .productImg').html()),
                 # browser.find_element_by_xpath("./*//a[@class='productImg']/img").get_attribute('src'),
            }
        except Exception as e:
            print("需要登录",e,type(product['image_url']))
            search()
        print(product)
        try:
            _download_image(product,output_dir )
        except Exception as e:
            print(e)


image_count = 0
def _download_image(product, folder):
    global image_count
    os.makedirs(folder, exist_ok=True)
    for elem in product['image_url']:
        url = 'https:' + elem
    image_count += 1
    image_path = os.path.join(folder, '{:06d}.jpg'.format(image_count))
    json_path = os.path.join(folder, '{:06d}.json'.format(image_count))
    if os.path.exists(image_path) and os.path.exists(json_path):
        return
    logging.info('{}: Downloading {}'.format(image_count, url))
    for i in range(3):
        try:
            urllib.request.urlretrieve(url, image_path)
            with open(json_path, 'w') as f:
                json.dump(product, f, ensure_ascii=False, indent=4)
            break
        except:
            logging.warning('Retry {}'.format(i))
    logging.info('Download finished')


def main():
    try:
        total = search()
        total = int(re.compile('(\d+)').search(total).group(1))
        for i in range(2, total + 1):
            next_page(i)
    except Exception as e:
        raise e
    finally:
        browser.close()

if __name__ == '__main__':
    main()

