#!/usr/bin/env python
# coding=utf-8

"""
@author: yaqiwe
@contact: 15338230927@163.com
@file: AmazonSearch.py
@date: 2020/10/27 20:37
@desc: 亚马逊搜索页面获取结果
"""

import time
from urllib.parse import urlencode

from bs4 import BeautifulSoup

from util.LogginUtil import logging
from util.RequestUtil import RequestUtil
from util.ProxyAccess import ProxyAccess


class AmazonSearch:
    searchUrl = 'https://www.amazon.co.uk'

    # k:搜索关键词  page：搜索结果页数
    parameter = {'k': 'socks',
                 'page': 1}

    def __init__(self):
        self.requestUtil = RequestUtil()
        self.proxyAccess = ProxyAccess()

    def searchInfo(self, k, page=1):
        """
        搜索页面
        :param k: 搜索关键词
        :param page: 搜索结果页数
        :return: 页面html
        """
        self.parameter['k'] = k
        self.parameter['page'] = page
        date = urlencode(self.parameter)
        logging.debug('搜索关键词请求url:{}'.format(self.searchUrl + '/s?' + date))
        res = self.proxyAccess.proxiesReq(self.searchUrl + '/s?' + date)
        # res = self.requestUtil.getRequest('https://www.amazon.co.uk/s?k=socks&page=9')
        return res

    def getShopUrlList(self, htmlText):
        """
        获取商品链接
        :param htmlText:
        :return:
        """
        bs = BeautifulSoup(htmlText, 'html.parser')
        shopHtmlList = bs.select('.a-section.a-spacing-medium.a-text-center')
        urlList = []
        for shopHtml in shopHtmlList:
            aTable = shopHtml.select_one('.a-link-normal.a-text-normal')
            urlList.append(self.searchUrl + aTable['href'])
        logging.info('获取的url数目:{}'.format(len(urlList)))
        return urlList

    def isCurrentlyUnavailable(self, urlList):
        """
        筛选出带Currently unavailable的商品
        :param urlList:
        :return:
        """
        if None == urlList:
            logging.error('请输入正确的url列表')
            return []
        CurrentlyUnavailableUrlList = []
        for url in urlList:
            logging.debug('请求商品详情页url:{}'.format(url))
            resText = self.proxyAccess.proxiesReq(url).text.encode('utf8')
            bs = BeautifulSoup(resText, 'html5lib')
            try:
                infoHtml = bs.select_one('.a-column.a-span8.a-span-last')
                test = infoHtml.find(text='Currently unavailable.')
                logging.info('查找对比的字符串：{}'.format(test))
                if None != test:
                    CurrentlyUnavailableUrlList.append(url)
                    logging.info('查找到的url：{}'.format(url))
            except Exception:
                continue
        return CurrentlyUnavailableUrlList
