#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------
# -- 亚马逊广告排名
# -- 加拿大
# ****************************
# Author: lmay.Zhou
# Blog: www.lmaye.com
# Date: 2021/9/6 18:45
# Email lmay@lmaye.com
# ----------------------------------------------------------
import time

import scrapy
from selenium import webdriver

from amazon_reptile import LOG
from amazon_reptile.items import AmazonReptileItem
from amazon_reptile.utils import jdbc_util, file_utils, dingding_utils


class AmazonCaSpider(scrapy.Spider):
    name = "amazon-ca"
    start_urls = ["https://www.amazon.ca"]

    def __init__(self, **kwargs):
        try:
            post_code1 = "M2R"
            post_code2 = "1A9"
            self.post_code = post_code1 + post_code2
            sql = "select id, keywords from t_ad_top where storee_code = '{}'".format(self.post_code)
            records = jdbc_util.sql_execute(sql, "select", jdbc_util.sql_connect())
            if None is records or 0 == len(records):
                return
            self.keyword = records[0]["keywords"]
            options = webdriver.ChromeOptions()
            # 无界面
            # options.add_argument('--headless')
            # 打开谷歌浏览器
            self.driver = webdriver.Chrome(chrome_options=options)
            self.driver.get(self.start_urls[0])
            # 窗口最大化
            self.driver.maximize_window()
            # 设置邮政编码
            self.driver.find_element_by_id("nav-global-location-popover-link").click()
            time.sleep(3)
            self.driver.find_element_by_id("GLUXZipUpdateInput_0").click()
            self.driver.find_element_by_id("GLUXZipUpdateInput_0").send_keys(post_code1)
            self.driver.find_element_by_id("GLUXZipUpdateInput_1").click()
            self.driver.find_element_by_id("GLUXZipUpdateInput_1").send_keys(post_code2)
            time.sleep(2)
            # 关闭弹窗
            self.driver.find_element_by_xpath("//div[@id='GLUXZipInputSection']//input[@type='submit']").click()
            time.sleep(2)
            # 关键字搜索
            self.driver.find_element_by_id("twotabsearchtextbox").click()
            self.driver.find_element_by_id("twotabsearchtextbox").clear()
            self.driver.find_element_by_id("twotabsearchtextbox").send_keys(self.keyword)
            self.driver.find_element_by_id("nav-search-submit-button").click()
            time.sleep(5)
            super().__init__(**kwargs)
        except Exception as e:
            LOG.error("[{}] 数据爬取失败: {}".format(self.name, e))
            dingding_utils.send_msg(self.name, e)

    def parse(self, response, **kwargs):
        """
            页面解析

            :param response: 响应数据
            :param kwargs: 动态参数
            :return:
        """
        page_index = 2 if "page=2" in self.driver.current_url else 1
        if 2 == page_index:
            return
        # 解析品牌
        yield self.parse_jackery(response, page_index)
        # 解析品牌视频
        yield self.parse_jackery_video(response, page_index)
        # 解析产品推广
        sponsored_products = self.parse_sponsored_products(response, page_index)
        for it in sponsored_products:
            yield it
        # 写入文件, 便于分析
        file_utils.write_file("amazon-ca-{}.html".format(int(time.time())), response.text, "resources")
        time.sleep(5)
        if response.xpath("//div[@class='a-section a-spacing-none a-padding-base']"):
            # self.driver.find_element_by_xpath("//div[@class='a-section a-spacing-none a-padding-base']//li[@class='a-last']//a[text()='Next']").click()
            self.driver.find_element_by_xpath(
                "//div[@class='a-section a-spacing-none a-padding-base']//li[@class='a-normal']//a[text()='2']").click()
        elif response.xpath(
                "//div[@class='a-section a-spacing-large a-spacing-top-large a-text-center s-pagination-container']"):
            self.driver.find_element_by_xpath(
                "//div[@class='a-section a-spacing-large a-spacing-top-large a-text-center s-pagination-container']//span[@class='s-pagination-strip']//a[text()='2']").click()
        else:
            return
        yield scrapy.Request(url=self.driver.current_url, body=self.driver.page_source, encoding="utf8",
                             callback=self.parse)

    def parse_jackery(self, response, page_index=1):
        """
            品牌 Jackery

            :param response: 响应
            :param page_index: 页码
            :return: AmazonReptileItem
        """
        item = AmazonReptileItem()
        item["platform"] = "亚马逊加拿大"
        item["asin"] = "Jackery"
        item["type"] = "Sponsoed Brands"
        item["post_code"] = self.post_code
        item["keyword"] = self.keyword
        # 解析顶部
        if response.xpath("//div[@class='a-section a-spacing-small a-spacing-top-small _bXVsd_header_2w_md']").get():
            if response.xpath(".//span[@class='a-truncate-full']//span[contains(text(), 'Jackery')]").get():
                item["ranking"] = "P{}-1".format(page_index)
                item["ranking_val"] = 24 if 1 == page_index else 20
                LOG.info("[AmazonCaSpider] 顶部推广: {}".format(item))
                return item
        elif response.xpath("//div[@class='_bGlmZ_header_2PHEO']").get():
            if response.xpath(".//span[@class='a-truncate-full']//span[contains(text(), 'Jackery')]").get():
                item["ranking"] = "P{}-1".format(page_index)
                item["ranking_val"] = 24 if 1 == page_index else 20
                LOG.info("[AmazonCaSpider] 顶部推广: {}".format(item))
                return item
        # 解析底部
        if response.xpath("//div[@class='_bXVsd_track_GwIZL']").get():
            for it in response.xpath("//div[@class='_bXVsd_track_GwIZL']"):
                for it2 in it.xpath(".//div[@class='_bXVsd_container_3aZDQ']"):
                    data = self.jackery_bottom(it2, item, page_index)
                    if data:
                        LOG.info("[AmazonCaSpider] 底部推广: {}".format(data))
                        return data
                    continue
        elif response.xpath("//div[@class='threepsl-creatives']").get():
            for it in response.xpath("//div[@class='threepsl-creatives']"):
                for it2 in it.xpath(".//div[@class='threepsl-creative']"):
                    data = self.jackery_bottom(it2, item, page_index)
                    if data:
                        LOG.info("[AmazonCaSpider] 底部推广: {}".format(data))
                        return data
                    continue
        return None

    def parse_jackery_video(self, response, page_index=1):
        """
            品牌视频 Jackery Video

            :param response: 响应
            :param page_index: 页码
            :return: AmazonReptileItem
        """
        item = AmazonReptileItem()
        item["platform"] = "亚马逊加拿大"
        item["asin"] = "Jackery Video"
        item["type"] = "Sponsoed Brands Video"
        item["post_code"] = self.post_code
        item["keyword"] = self.keyword
        # 解析品牌视频
        if response.xpath("//div[@class='s-include-content-margin s-border-bottom']").get():
            if response.xpath(".//h2//span[contains(text(), 'Jackery')]").get():
                item["ranking"] = "P{}".format(page_index)
                item["ranking_val"] = 24 if 1 == page_index else 12
                LOG.info("[AmazonCaSpider] 视频推广: {}".format(item))
                return item
        return None

    def parse_sponsored_products(self, response, page_index=1):
        """
            产品推广 Sponsored Products

            :param response: 响应
            :param page_index: 页码
            :return: AmazonReptileItem
        """
        records = []
        row = 1
        result = response.xpath("//div[@data-component-type='s-search-result']")
        total = len(result)
        score = 24.0 / total
        if 2 == page_index:
            return records
        for it in result:
            if "AdHolder" in it.css("div::attr(class)").get() and it.xpath(
                    ".//span[contains(text(), 'Jackery')]").get():
                item = AmazonReptileItem()
                item["platform"] = "亚马逊加拿大"
                item["asin"] = it.css("div::attr(data-asin)").get()
                item["type"] = "Sponsoed Products"
                item["post_code"] = self.post_code
                item["keyword"] = self.keyword
                item["ranking"] = "P{}-{}".format(page_index, row)
                item["ranking_val"] = round((total - (row - 1)) * score, 2)
                records.append(item)
            row += 1
        LOG.info("[AmazonCaSpider] 产品推广: {}".format(records))
        return records

    @staticmethod
    def jackery_bottom(it2, item, page_index=1):
        """
            品牌底部处理

            :param it2: 节点
            :param item: 数据
            :param page_index: 页码
            :return: AmazonReptileItem
        """
        if it2.xpath(".//span[contains(text(), 'Jackery')]").get():
            idx = it2.css("div::attr(data-index)").get()
            if "0" == idx:
                item["ranking"] = "P{}-2".format(page_index)
                item["ranking_val"] = 18 if 1 == page_index else 14
            elif "1" == idx:
                item["ranking"] = "P{}-3".format(page_index)
                item["ranking_val"] = 17 if 1 == page_index else 13
            elif "2" == idx:
                item["ranking"] = "P{}-4".format(page_index)
                item["ranking_val"] = 16 if 1 == page_index else 12
            elif "3" == idx:
                item["ranking"] = "P{}-5".format(page_index)
                item["ranking_val"] = 15 if 1 == page_index else 11
            else:
                return None
            return item
        return None

    def close(self, spider):
        """
            浏览器关闭

            :param spider: 爬虫
            :return:
        """
        self.driver.close()
