"""
Python爬虫一般步骤：请求URL，下载对应网页 -> 解析网页 -> 数据存储
    常用于请求的库（网络）有urlib，urllib3，requests等等
    常用于解析网页的库有：re，lxml，BeautifulSoup等

爬虫难度递增：
    1. 下载一般的网页
    2. 下载js动态的网页
    3. 下载需要登录的网页
    4. 下载带有验证码的网页
"""
import logging

import requests
from lxml import etree

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'
                  '71.0.3578.98 Safari/537.36'
}


def request():
    url = "https://www.amazon.cn/s?k=%E9%9B%80%E5%B7%A2&__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99" \
          "&ref=nb_sb_noss_1"
    res = requests.get(url=url, headers=headers)
    parse(res)
    return res


def parse(response):
    tree = etree.HTML(response.text)
    # //*[@id="search"]/div[1]/div[2]/div/span[3]/div[1]/div/div/div/div/div[2]/div[1]/div/div/span/a
    # data = tree.xpath('//*[@class="s-result-list sg-row"]/div/@data-index')
    # print(data)
    href = tree.xpath(
        '//*[@id="search"]/div[1]/div[2]/div/span[3]/div[1]/div/div/div/div/div[2]/div[1]/div/div/span/a/@href')
    # print(href)
    detail_urls = ['https://www.amazon.cn' + x for x in href]
    # print(detail_urls)
    for url in detail_urls:
        res = requests.get(url=url, headers=headers)
        parse_detail(res)
        # with open('amazon_detail.html', 'w', encoding='utf-8') as f:
        #     f.write(res.text)
    next_url = tree.xpath('//li[@class="a-last"]/a/@href')
    if next_url:
        print("下一页")
        # TODO 迭代请求下一页
        res = requests.get(url='https://www.amazon.cn'+next_url[0], headers=headers)
        parse(res)
    return 0


def parse_detail(response):
    """提取清洗数据"""
    tree = etree.HTML(response.text)
    sku = {}
    try:
        sku["name"] = tree.xpath('//span[@id="productTitle"]/text()')[0].strip()
        sku["price"] = tree.xpath('//span[@id="priceblock_ourprice"]/text()')[0].lstrip('￥')  # TODO: 价格存在不一样的位置
        # https://images-cn.ssl-images-amazon.com/images/I/51aqbYZ1RsL._SX38_SY50_CR,0,0,38,50_.jpg
        # https://images-cn.ssl-images-amazon.com/images/I/61AbejQqQdL._SX679_.jpg
        image_urls = tree.xpath('//*[@id="altImages"]//img/@src')
        sku["image_urls"] = [image_urls[i][:-28] + 'SX600.jpg' for i in range(len(image_urls)-1)]  # 处理略缩图URL，最后一张舍去
        print(sku)
    except Exception as e:
        logging.error(response.url)
        print(e)
    pass


def save(data):
    pass


if __name__ == "__main__":
    request()
