# -*- coding: utf-8 -*-
# @Time    : 2021/3/17 10:52
# @Author  : YAQIWE
# @File    : amz_info.py
# @Software:  PyCharm
# @explain : 爬取亚马逊信息

from util import logging

import requests
from bs4 import BeautifulSoup

logging.basicConfig(level=logging.DEBUG)

from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
import time
import re


class amz_info:
    url = {'us': 'https://www.amazon.com', 'ca': 'https://www.amazon.ca'}

    headers = {'referer': '',
               'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36',
               'via': '1.1 04904401d608fcf25189f8fad65fe7a4.cloudfront.net (CloudFront)'}

    # def amz_search(self, asin):
    #     url = self.url.format(ASIN=asin)
    #     self.headers['referer'] = url
    #     searchInfo = requests.get(url, headers=self.headers)
    #     if 200 == searchInfo.status_code:
    #         logging.info(f'爬取成功，开始获取产品链接，ASIN:{asin}')
    #         bs = BeautifulSoup(searchInfo.text,'lxml')
    #         logging.info(searchInfo.text)
    #         aTag=bs.select('a[class="a-size-base a-link-normal a-text-normal"]')
    #         logging.info(f'获取到的标签：{aTag}')
    #     else:
    #         logging.info(f'爬取失败，返回状态码:{searchInfo.status_code}，ASIN:{asin}')

    def amzInfo(self, country='us'):
        """
        创建浏览器对象并返回
        :return:
        """
        logging.info(f'开始初始化浏览器，国家{country}')
        options = webdriver.ChromeOptions()
        # get直接返回，不再等待界面加载完成
        desired_capabilities = DesiredCapabilities.CHROME
        desired_capabilities["pageLoadStrategy"] = "none"
        # 此步骤很重要，设置为开发者模式，防止被各大网站识别出来使用了Selenium

        # 调用本机的chrome浏览器
        # option = webdriver.ChromeOptions()
        if country == 'us':
            # options.add_argument("--user-data-dir=" + f"C:/Users/yaqiwe/AppData/Local/Google/Chrome/User Data/Default")
            options.add_argument("--user-data-dir=" + f"C:/Users/Administrator/AppData/Local/Google/Chrome/User Data/Default")
            # options.add_argument("--user-data-dir=" + f"C:/Users/yiyayiyayo/AppData/Local/Google/Chrome/User Data/Default")
        options.add_experimental_option('excludeSwitches', ['enable-automation', 'enable-logging'])
        options.add_argument("--ignore-certificate-error")
        options.add_argument("--ignore-ssl-errors")

        self.chrom = webdriver.Chrome(executable_path='./static/chromedriver.exe', chrome_options=options)
        # 先清除浏览器所有数据
        # self.cleanAllCookie()
        # 打开对应网站
        self.chrom.get(self.url[country])
        # 设置查找标签等待时间为10秒
        self.chrom.implicitly_wait('10')
        logging.info(f'浏览器初始化完成，国家{country}')
        return self.chrom

    def cleanAllCookie(self):
        """
        清除打开的浏览器的所有数据
        :return:
        """
        self.chrom.get('chrome://settings/content/all')
        button = self.chrom.find_element_by_id('clearAllButton')
        button.cr - button.click()

    def setDeliver(self, deliverId='10001'):
        """
        设置邮编地址
        :param deliverId:
        :return:
        """
        try:
            logging.info('点击修改配送')

            delicerDiv = self.chrom.find_element_by_id('glow-ingress-block')
            delicerDiv.click()
            logging.info('输入配送地邮编')
            deliverInput = self.chrom.find_element_by_class_name('GLUX_Full_Width')
            deliverInput.send_keys(deliverId)
            time.sleep(2)
            logging.info('点击保存修改邮编')
            saveButton = self.chrom.find_element_by_xpath("//*[@aria-labelledby='GLUXZipUpdate-announce']")
            saveButton.click()
            try:
                self.chrom.find_element_by_class_name('a-alert-inline-error')
                logging.info('输入邮编错误')
                self.chrom.refresh()
                return False
            except Exception as e:
                logging.info('找不到该元素，输入邮编正常')
            time.sleep(5)
            logging.info('刷新页面')
            self.chrom.refresh()
            # saveButton2= self.chrom.find_element_by_xpath("//*[@id='GLUXConfirmClose',@class='a-button-input',aria-labelledby='GLUXConfirmClose-announce']")
            # if saveButton2!=None:
            #     saveButton2.click()
            return True
        except Exception as e:
            logging.error(f'修改邮编错误:{e}')
            return False

    # def setCookie(self,cookie=None):
    #     if cookie ==None:
    #         cookie = 'session-id=138-7343338-8461229; session-id-time=2082787201l; i18n-prefs=USD; ubid-main=132-2891720-7434634; lc-main=en_US; session-token=n/XOn9VOYfrr5F0Ujs01k5Xs2FqFmN1AD6ocjcUpSrEPpDU8p5CRQPK16Tt5KO6Xud2vMiIi33XrXr250Xgzje5zIIkKmS+XqgiGYp/BrGZf9thFPO7jvMRgTMoplfGGFsMLeKM0LxyC0QUdR5q9S74shQGByEVe94OY86e05uEIYV3xf5lISXBz/D+1vRtj; skin=noskin; csm-hit=tb:92YPAXBNZM0SSM3005PF+s-92YPAXBNZM0SSM3005PF|1616246336631&t:1616246336631&adb:adblk_no'
    #     cookies={"name" : "foo", "value" : "bar"}
    #     self.chrom.delete_all_cookies()
    #     for co in cookie.split(';'):
    #         key, value = co.split('=', 1)
    #         cookies[key] = value
    #     self.chrom.add_cookie(cookies)

    def seaAsin(self, asin: str):
        """
        搜索产品asin并点击进入产品页面
        :param asin:
        :return: 产品详情页面的html数据
        """
        logging.info(f'开始查找产品ASIN:{asin}')
        try:
            searchInput = self.chrom.find_element_by_id('twotabsearchtextbox')
            # 删除搜索框内字符
            searchInput.send_keys(Keys.CONTROL, 'a')
            searchInput.send_keys(Keys.BACK_SPACE)
            # 输入ASIN
            searchInput.send_keys(asin)
            self.chrom.find_element_by_id('nav-search-submit-button').click()
            # 点击搜索结果第一个商品
            shoppingProject = self.chrom.find_element_by_class_name('a-link-normal.s-no-outline')
            shoppingProject.click()
            logging.info(f'查找产品ASIN:{asin}完成')
            return True
        except Exception as e:
            logging.error(f'未找到商品{asin},错误：{e}')
        return False

    def seaGood(self,goodName):
        """
        搜索商品
        :param goodName:
        :return:
        """
        logging.info(f'开始查找产品:{goodName}')
        try:
            searchInput = self.chrom.find_element_by_id('twotabsearchtextbox')
            # 删除搜索框内字符
            searchInput.send_keys(Keys.CONTROL, 'a')
            searchInput.send_keys(Keys.BACK_SPACE)
            # 输入ASIN
            searchInput.send_keys(goodName)
            self.chrom.find_element_by_id('nav-search-submit-button').click()
        except Exception as e:
            logging.error(f'查找产品{goodName},错误：{e}')

    def getAllGoodUrl(self):
        urlList = []
        try:

            while True:
                # 休眠4秒等待网页加载完成
                time.sleep(4)
                htmlText=self.chrom.page_source
                urls = re.findall('<div data-asin="(.*?)" data-index', htmlText, flags=re.S)[0:16]
                urlList+=urls
                nextButton = self.chrom.find_element_by_class_name('a-last')
                nextButton.click()
        except Exception as e:
            logging.error(f'查找所有产品url错误：{e}')
        return urlList

    def getRanking(self):
        """
        抓取排名数据
        :return:
        """
        # 第一种 表格形式的商品信息
        logging.info(f'开始获取产品排名信息')
        try:
            # 休眠4秒等待网页加载完成
            time.sleep(4)
            html = self.chrom.page_source

            rankingsTest= re.findall('Best Sellers Rank(.*?)</td>',html,flags= re.S)[0]
            rankIntList = re.findall('#(.*?)in',rankingsTest,flags= re.S)
            rankings =[]
            for rankText in rankIntList:
                rank = re.sub('\D','',rankText)
                if len(rank) >0 :
                    rankings.append(int(rank))
            if len(rankings)>0:
                return rankings
        except Exception as e:
            logging.error(f'获取商品排名信息错误:{e},无排名信息')
        logging.info(f'商品无排名信息')
        return ['--', '--']

    def getComment(self):
        """
        抓取评论信息
        :return:
        """
        logging.info(f'开始获取商品评论信息')
        try:
            allComment = self.chrom.find_element_by_class_name('a-link-emphasis.a-text-bold')
            # 所有评论
            allComment.click()
            # 等待4秒防止网页未加载完
            time.sleep(4)
            commentNText = self.chrom.find_element_by_class_name('a-row.a-spacing-base.a-size-base').text
            comNum = commentNText.split('|')[1]
            # selectBtn = self.chrom.find_element_by_id('media-type-dropdown')
            # selectBtn.click()
            # 选择仅图片评论
            imgComment = self.chrom.find_element_by_xpath("//*[@data-reftag='cm_cr_arp_d_viewopt_mdrvw']")
            imgComment.click()
            # 等待4秒防止网页未刷新完成，获取到上一个排名数据
            time.sleep(4)
            imgCommentNText = self.chrom.find_element_by_class_name('a-row.a-spacing-base.a-size-base').text
            imgComNum = imgCommentNText.split('|')[1]
            comNum = [re.sub('\D', '', comNum), re.sub('\D', '', imgComNum)]
            return comNum
        except Exception as e:
            logging.info(f'抓取评论出错无评论结果,错误信息{e}')
        logging.info('商品无评论信息')
        return ['0', '0']


if __name__ == '__main__':
    a = amz_info()
    # a.amzInfo()
    # # # k = a.setDeliver()
    # # # if k:
    # infoList = []
    info = {}
    # a.seaAsin('B07TYP34FX')
    # info['asin'] = 'B07TYP34FX'
    # rankingsInfo = a.getRanking()

    # # -------------------------------------
    # a.seaAsin('B08HYD9FSP')
    # info = {}
    # info['asin'] = 'B08HYD9FSP'
    # rankingsInfo = a.getRanking()
    # info['rankings'] = rankingsInfo
    # comNum = a.getComment()
    # info['comNum'] = comNum
    # infoList.append(info)
    # print(infoList)
    # ----------------------------------
    # # 加拿大测试
    # a = amz_info()
    a.amzInfo('us')
    # # k = a.setDeliver()
    # # if k:
    # infoList = []
    # info = {}
    # a.seaAsin('B07L6BSG14')
    # info['asin'] = 'B07L6BSG14'
    # rankingsInfo = a.getRanking()
    # # info['rankings'] = rankingsInfo
    # # comNum = a.getComment()
    # # info['comNum'] = comNum
    # # infoList.append(info)
    #
    # print(infoList)
    time.sleep(100000)

    '{asin:“B07PP5P8FW”,"rankings":[],"comNum":[]}'
