# -*- coding: utf-8 -*-

import scrapy
from ymx.items import YmxItem
from time import sleep

class MySpider(scrapy.Spider):
    name = 'myspider'
    allowed_domains = ['amazon.cn']

    start_urls = [
        'https://www.amazon.cn/s?k=scissors&__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&crid=1Y65O5VMLGH51&qid=1555466054&sprefix=scissors%2Caps%2C149&ref=nb_sb_ss_i_1_8',
        #'https://www.amazon.cn/s?k=scissors&page=2&__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&crid=1Y65O5VMLGH51&qid=1555466054&sprefix=scissors%2Caps%2C149&ref=sr_pg_1',
        #'https://www.amazon.cn/s?k=scissors&page=3&__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&crid=1Y65O5VMLGH51&qid=1555466054&sprefix=scissors%2Caps%2C149&ref=sr_pg_3',
    ]

    def parse(self, response):
        #实例容器
        items = YmxItem()

        i = 1
        domaim = 'https://www.amazon.cn'

        url_items = []
        name_items = []
        sort_items = []
        for va in response.xpath('//div/h5/a/@href'):
            url = domaim + va.extract()
            url_items.append(url)
            sort_items.append(i)
            i+=1

        for vo in response.xpath('//div/h5/a/span/text()'):
             name = vo.extract()
             name_items.append(name)

        for vi in sort_items:
            items['sort'] = vi
            items['name'] = name_items[int(vi)-1]
            items['url'] = url_items[int(vi)-1]
            yield items
            #print(items)



        # 获取关键词 搜索
        # with open(r'./keywords.txt', mode='r', encoding='UTF-8') as file_to_read:
        #     while True:
        #         keyword = file_to_read.readline()  # 整行读取数据
        #         if not keyword:
        #             break
        #             pass
        #         else:

        base_url = 'https://www.amazon.cn/s?k='
        last_url = '&__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&crid=1Y65O5VMLGH51&qid=1555466054%2Caps%2C149&ref=nb_sb_ss_i_1_8'
        keywords = ['T恤','手机']
        for keyword in keywords:
            url = base_url + keyword + last_url
            print(url)
            yield scrapy.Request(url, callback=self.parse)
        #exit()

        #for url in response.xpath('//div/h5/a/@href').extract():
        #   yield scrapy.Request(url, callback=self.parse)



