# -*- coding: utf-8 -*-
# Python Selenium BeautifulSoup 爬取动态网站
from selenium import webdriver
from selenium.webdriver.common.by import By  # locate element
import os

# 二进制驱动文件
import chromedriver_binary
# python-chromedriver-binary
# https://github.com/danielkaiser/python-chromedriver-binary

import time,datetime
import json


def create_headless_chrome(wait):
    # 设置成无界面模式
    opt = webdriver.ChromeOptions()
    opt.add_argument('--headless')
    opt.add_argument('--disable-gpu')
    browser = webdriver.Chrome(options=opt)
    browser.implicitly_wait(wait)  # seconds
    return browser


def get_list_page_urls(base_url,end):
    return ["{0}?page={1}".format(base_url, page) for page in range(1,end+1)]


def get_detail_page_urls(browser, urls_of_list_page, xpath_of_detail_link):
    urls_of_detail_page = []
    for url in urls_of_list_page:
        browser.get(url)
        urls_of_detail_page += [a.get_attribute('href') for a in browser.find_elements_by_xpath(xpath_of_detail_link)]
    return urls_of_detail_page


def get_info(browser, urls_of_detail_page):
    records = []
    for url in urls_of_detail_page:
        browser.get(url)
        record = dict()
        record['title'] = browser.find_element_by_xpath("//div[@class='product-detail-list-title']").text
        record['price_sell'] = browser.find_element_by_xpath("//span[@class='h-currency h-cny bold']").text
        ele_img_array = driver.find_elements_by_xpath("//div[@class='picture-viewer-scroll-content']/ul/li/img")
        record['imgs'] = [ele.get_attribute('src') for ele in ele_img_array]

        now = datetime.datetime.now()  # datetime 时间元组
        str_date = now.strftime("%Y%m%d")
        str_random = str(now.timestamp())[-5:]
        record['code'] = str_date + str_random
        records.append(record)
    return records


# 开始请求
# url_example = 'https://www.uniqlo.cn/c/NVZHUANG.html?page=3'
# url_example = 'https://www.uniqlo.cn/product-detail.html?productCode=u0000000007483'

# 页面原代码
# html = driver.page_source

# 定位元素、获取数据
# ele.get_attribute('innerHTML')
# ele.text
# xpath = "//div[@class='product-content']/p[2]"
# xpath = "//div[@class='product-content']"

driver = create_headless_chrome(10)
list_page_urls = get_list_page_urls('https://www.uniqlo.cn/c/NVZHUANG.html',5)
print(list_page_urls)
detail_page_urls = get_detail_page_urls(driver, list_page_urls, "//div[@class='h-product']/a")
print(detail_page_urls)
records = get_info(driver, detail_page_urls)
# print(records)
driver.close()  # 关闭浏览器
driver.quit()  # 关闭chromedrive进程

with open('good_data_from_uniqlo.txt','w+') as file:
    store = json.dumps(records)
    file.write(store)



# Reference
# Python爬虫之谷歌浏览器无界面启动
# https://blog.csdn.net/u011304490/article/details/79955158
# selenium操作无界面chrome浏览器
# https://blog.csdn.net/qq_24499417/article/details/81408655
# python爬虫从入门到放弃（八）之 Selenium库的使用
# https://www.cnblogs.com/zhaof/p/6953241.html
# selenium with python
# https://selenium-python.readthedocs.io/
# 测试教程网
# http://www.testclass.net/selenium_python/webdriver-common-method

