import time

import requests

from lxml import etree
import re
import pandas as pd

# 解决请求头冒号
# import collections.abc

# hyper needs the four following aliases to be done manually.
# collections.Iterable = collections.abc.Iterable
# collections.Mapping = collections.abc.Mapping
# collections.MutableSet = collections.abc.MutableSet
# collections.MutableMapping = collections.abc.MutableMapping
# from hyper.contrib import HTTP20Adapter

"""
https://www.amazon.com/product-reviews/B091FHCGPY/ref=cm_cr_getr_d_show_all?ie=UTF8&showViewpoints=1&pageNumber=1&reviewerType=all_reviews&filterByStar=all_stars&sortBy=recent
ASIN: B091FHCGPY
pip install selenium,webdriver-manager -i https://mirrors.aliyun.com/pypi/simple
"""


def use_charom():
    from selenium import webdriver
    from selenium.webdriver.chrome.service import Service as ChromeService
    from webdriver_manager.chrome import ChromeDriverManager

    # selenium配置参数
    # options = webdriver.ChromeOptions()
    # 配置无头参数,即不打开浏览器
    # user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"
    # options.add_argument('--user-agent=%s' % user_agent)
    # 配置Chrome浏览器的selenium驱动
    # service = Service()
    # 将参数设置+浏览器驱动组合
    # driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()), options=options)
    url = "https://www.amazon.com/Tarsus-Kabbalah-Bracelet-Protection-Bracelets/product-reviews/B088TGBRGY/ref=cm_cr_arp_d_paging_btm_2?ie=UTF8&pageNumber=2&reviewerType=all_reviews"
    # driver.get(url)
    # print(driver.page_source)
    # time.sleep(30)
    #
    # from selenium import webdriver

    chrome_options = webdriver.ChromeOptions()
    chrome_options.add_experimental_option(
        "excludeSwitches", ["enable-automation"])
    chrome_options.add_experimental_option('useAutomationExtension', False)
    chrome_options.add_argument('lang=zh-CN,zh,zh-TW,en-US,en')
    chrome_options.add_argument('user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36')
    chrome_options.add_argument("disable-blink-features=AutomationControlled")  # 就是这一行告诉chrome去掉了webdriver痕迹

    driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()), options=chrome_options)
    driver.get(url)
    print(driver.page_source)
    with open('111.html', mode='w', encoding='utf-8') as o_file:
        o_file.write(driver.page_source)
    time.sleep(30)


def getHtml(pageNum):
    url = "https://www.amazon.com/Tarsus-Kabbalah-Bracelet-Protection-Bracelets/product-reviews/B088TGBRGY/ref=cm_cr_arp_d_paging_btm_2?ie=UTF8&pageNumber=2&reviewerType=all_reviews"
    # url = f"https://www.amazon.com/product-reviews/B091FHCGPY/ref=cm_cr_arp_d_paging_btm_next_{pageNum}"
    # url = "https://www.amazon.com/Bracelets-Mexican-Bracelet-Protection-Anklets/product-reviews/B0B28WKL73/ref=cm_cr_dp_d_show_all_btm?ie=UTF8&reviewerType=all_reviews"
    # 伪装
    headers = {
        # ":authority": "www.amazon.com",
        # ":method": "GET",
        # ":path": "/Bracelets-Mexican-Bracelet-Protection-Anklets/product-reviews/B0B28WKL73/ref=cm_cr_dp_d_show_all_btm?ie=UTF8&reviewerType=all_reviews",
        # ":scheme": "https",
        # "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        # 用户的身份信息
        # "Cookie": "session-id=135-9774830-4076126; session-id-time=2082787201l; i18n-prefs=USD; skin=noskin; ubid-main=135-2116695-7458833; lc-main=en_US; session-token=2uS5MWUxLiHQIriuMDjoILV9dB+SYq++u1xTb5IQ3HrG5kksXJZ7N8kOmkZ/K2kXSdevxyDFyAs9qtIjeUVRuHDd8KIIC2+XI1NvO6QLFPT5LumovaXwm0wJFXd8FWbhLhMG1wGJ8/7w65oMwzz99F7NzgpLM8QaMmq07u29pUVKYvjRDtm7bfYjTo+XAJU2CS+Dlb6uFRm2gbai3E1kjLhAMmSxMw5IMCAhLv1+4p4yUk8p6chZ4w8iOQSzkbDWjYiTPiJx6m2/GnPEbxGy+oNIijXghuEOn/nG4/nSkSq4zUapnmbCTwa02w/2dZwUZ1bQ/HErVs1S79QuMPymZ4aHTEHOnyca; csm-hit=tb:8YAT68PN2A7PSSSGM3R1+s-5AGT1MF5DK0ZE86C3M0G|1693575871047&t:1693575871047&adb:adblk_no",
        # 防盗链
        # "Referer": "https://www.amazon.com/Bracelets-Mexican-Bracelet-Protection-Anklets/dp/B0B28WKL73/ref=sr_1_1_sspa?crid=3OIM0PWXT9YCM&keywords=evil+eye+bracelets&qid=1693575380&sprefix=evil+%2Caps%2C441&sr=8-1-spons&sp_csd=d2lkZ2V0TmFtZT1zcF9hdGY&psc=1",
        # 浏览器的基本信息
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
        # "Sec-Ch-Ua": '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
    }
    # cookies = {
    #     "Cookie": "session-id=135-9774830-4076126; session-id-time=2082787201l; i18n-prefs=USD; skin=noskin; ubid-main=135-2116695-7458833; lc-main=en_US; session-token=2uS5MWUxLiHQIriuMDjoILV9dB+SYq++u1xTb5IQ3HrG5kksXJZ7N8kOmkZ/K2kXSdevxyDFyAs9qtIjeUVRuHDd8KIIC2+XI1NvO6QLFPT5LumovaXwm0wJFXd8FWbhLhMG1wGJ8/7w65oMwzz99F7NzgpLM8QaMmq07u29pUVKYvjRDtm7bfYjTo+XAJU2CS+Dlb6uFRm2gbai3E1kjLhAMmSxMw5IMCAhLv1+4p4yUk8p6chZ4w8iOQSzkbDWjYiTPiJx6m2/GnPEbxGy+oNIijXghuEOn/nG4/nSkSq4zUapnmbCTwa02w/2dZwUZ1bQ/HErVs1S79QuMPymZ4aHTEHOnyca; csm-hit=tb:8YAT68PN2A7PSSSGM3R1+s-5AGT1MF5DK0ZE86C3M0G|1693575871047&t:1693575871047&adb:adblk_no",
    # }
    #
    # auth = {
    #     ":authority": "www.amazon.com",
    # }
    # params = {
    #     "ie": "UTF8",
    #     "showViewpoints": 1,
    #     "pageNumber": pageNum,
    #     "pageSize": 10,
    #     "reviewerType": "all_reviews",
    #     "filterByStar": "all_stars",
    #     "sortBy": "recent",
    #
    # }
    # 1、发送网络请求
    # sessions = requests.session()
    # sessions.mount("https://www.amazon.com", HTTP20Adapter())
    # response = sessions.get(url=url, params=params, headers=headers)
    # response.encoding = 'UTF-8'

    import httpx

    # with httpx.Client(headers=headers, http2=True) as client:
    #     response = client.get(url)
    #     print(response.text)
    client = httpx.Client(http2=True, headers=headers)
    response = client.get(url)
    print(response.text)
    with open('111.html', mode='w', encoding='utf-8') as o_file:
        o_file.write(response.content.decode())
    return response


def saveHtml():
    # 2、获取数据
    # print(response.text)
    response = getHtml(1)
    with open('B091FHCGPY_03.html', mode='w', encoding='utf-8') as o_file:
        o_file.write(response.content.decode())


def getData(review, excel_data):
    # 转换成html etree.tostring(reviewDivs).decode('UTF-8')
    # 用户名称
    name = review.xpath('.//span[@class="a-profile-name"]/text()')
    excel_data['用户名称'].append(name[0])
    print(f'用户名：{name[0]}')
    # 评论时间
    time = review.xpath('.//span[@data-hook="review-date"]/text()')
    excel_data['评论时间'].append(time[0])
    print(f'评论时间：{time[0]}')

    # 产品型号
    color = review.xpath('.//a[@data-hook="format-strip"]/text()')
    excel_data['产品型号'].append(color[0])
    print(f'产品型号：{color[0]}')

    # VIP
    vip = review.xpath('.//span[@data-hook="avp-badge"]/text()')
    excel_data['是否VP'].append(vip[0])
    print(f'是否VP：{vip[0]}')

    # 评分
    score_rex = re.compile(r"(?P<score>.*?) out of 5 stars", re.S)
    score = review.xpath('.//span[@class="a-icon-alt"]/text()')
    score_num = 0
    if score is not None:
        score_num = str(score_rex.search(score[0]).group('score'))
    excel_data['评分'].append(score_num)
    print(f'评分：{score_num}')

    # 内容
    content = review.xpath('.//span[@data-hook="review-body"]/span/text()')[0]
    excel_data['内容'].append(content)
    # print(etree.tostring(content).decode('UTF-8'))
    print(f'内容：{content}')

    # 图片评论
    img_src = review.xpath('.//img[@data-hook="review-image-tile"]/@src')
    excel_data['图片地址'].append(img_src)
    # print(etree.tostring(content).decode('UTF-8'))
    print(f'图片评论：{img_src},类型：{type(img_src)}')


if __name__ == '__main__':
    # getHtml(1)
    use_charom()
    # # 先请求一次，获取总页数
    # response = getHtml(1)
    # html = etree.HTML(response.text)
    # # 获取所有评论总计
    # # 去除换行符，和空格 normalize-space()
    # div = html.xpath('normalize-space(//div[contains(@id,"filter-info-section")]/div/text())')
    # nums = re.compile('(?P<totalRating>.*?) total ratings, (?P<reviews>.*?) with reviews').search(div)
    # totalRating = nums.group('totalRating').replace(',', '')
    # reviewsNum = nums.group('reviews')
    # print(f'总评论数：{totalRating},有星评论数：{reviewsNum}')
    # import math
    #
    # totalPages = math.ceil(int(totalRating) / 10)
    # print(f'总页数：{totalPages}')
    # curr_page = 1
    #
    # # 创建一个示例数据
    # excel_data = {'用户名称': [],
    #               '评论时间': [],
    #               '产品型号': [],
    #               '是否VP': [],
    #               '评分': [],
    #               '内容': [],
    #               '图片地址': [],
    #               }
    #
    # # 保存为Excel文件
    #
    # while curr_page <= totalPages:
    #     response = getHtml(curr_page)
    #
    #     html = etree.HTML(response.text)
    #     print(f'-------------获取第{curr_page}页评论详情start,共计{totalPages}页--------------')
    #     print(f'请求路径：{response.url}')
    #     reviewDivs = html.xpath("//div[@id='cm_cr-review_list']/div[@data-hook='review']")
    #     for review in reviewDivs:
    #         getData(review, excel_data)
    #     curr_page += 1
    #     print(f'-------------获取第{curr_page}页详情end--------------')
    #
    # df = pd.DataFrame(excel_data)
    # df.to_excel('example.xlsx', index=False)
