"""
@Project ：jingdong 
@File    ：jingdong-python-spider.py
@IDE     ：PyCharm 
@Author  ：Saltman
@Date    ：2023/5/4 15:30
@Description： 爬取京东的商品数据
"""
from urllib.parse import urlencode

import scrapy
import time

from scrapy.http import HtmlResponse
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

from jingdong.items import JingdongItem
from selenium import webdriver
from selenium.webdriver.chrome.options import Options  # 使用无头浏览器

# 无头浏览器设置
chrome_options = Options()
# chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")

# cookie
cookie = {'shshshfpa': '55be8b38-5f70-62e8-0b7d-cf5846fb2065-1656211435', '__jdu': '1656211434569105843070',
          'shshshfpb': 'mLEGoRQ0WxkmCfOIEXdBjGg', 'shshshfpx': '55be8b38-5f70-62e8-0b7d-cf5846fb2065-1656211435',
          'pinId': 'XhBcQxitb1dDVkeBi-qlF7V9-x-f3wj7',
          'autoOpenApp_downCloseDate_autoOpenApp_autoPromptly': '1679899685135_1', 'areaId': '13',
          'PCSYCityID': 'CN_370000_370100_0', 'shshshfp': '8b64f583401996712899294096a7b263',
          'ipLoc-djd': '13-1000-1002-40642',
          'TrackID': '1W4MtFZZmGv0bQKiScpWNEMuDYjeXOKAloOgtRNrB5Zmr4tRwNu9arG0dmP_y7iQlthuGk68ZY6TTqPCewOMrn51-i7on3Wt9PM2dXA-_wP4DKNuCd3xJc8mlovhHHo2B',
          'pin': 'jd_5f24390e38d73', 'unick': '木斯皮尔翰', 'ceshi3.com': '103', '_tp': '', '_pst': 'jd_5f24390e38d73',
          'user-key': '1f11199e-1015-4c10-b463-d0a38e72f751', 'cn': '33',
          'unpl': 'JF8EALpnNSttUU0BBxhSHxNFSAhSW15dTR4BP24GXA0KTFZQTFEbExN7XlVdXxRKEx9ubhRUXFNKXQ4bAysSEXtdVV9fD0oeBm5vNWQIWklVBh0CSxB-SzMhOTZuHx4CbmdsDG1bS2QEKwIcFhdIXVRbXw1PEAtvZgFdWFpKUgQrAysSGE9tZG5YCEoWAmdgBFNcaEpkBxoDHhYRTFxXWW1DJRZOb2ABU15YS1EHHgYcGhBKWV1bXwlNFjNuVwY',
          '__jda': '76161171.1656211434569105843070.1656211434.1683371324.1683511690.19', '__jdc': '76161171',
          '3AB9D23F7A4B3CSS': 'jdd033YIR3ULKSSONXNMUXJZWWPWSR26246RHDA3Q4TA6NUBX6CGGEXEQFQ5SPBTHZMDRY6VXAGDR5IDOCIRNY7ODNJ4RVEAAAAMH7EO4TFIAAAAADXTGM26B64OOW4X',
          '_gia_d': '1',
          '3AB9D23F7A4B3C9B': '3YIR3ULKSSONXNMUXJZWWPWSR26246RHDA3Q4TA6NUBX6CGGEXEQFQ5SPBTHZMDRY6VXAGDR5IDOCIRNY7ODNJ4RVE',
          '__jdb': '76161171.2.1656211434569105843070|19.1683511690',
          '__jdv': '76161171|www.baidu.com|t_1003608409_|tuiguang|87d32a50d2d742d783a829ac63dfb102|1683511696967'}


class JingdongPythonSpider(scrapy.Spider):
    name = "jingdong-python"

    def __init__(self, **kwargs):
        # 实例化selenium的谷歌浏览器对象
        # super().__init__(**kwargs)
        # self.driver = webdriver.Chrome(chrome_options=chrome_options)
        pass

    def closed(self, response):
        print("爬虫正在退出，执行关闭浏览器")
        time.sleep(2)
        # self.driver.quit()

    def parse(self, response, **kwargs):
        """
        html = response.text
        with open('test.txt', mode='a', encoding='utf-8') as f:
            for i in html:
                f.write(i)
            f.close()
        """
        goods_list = response.xpath('//*[@id="J_goodsList"]/ul')
        goods_list_li = goods_list.xpath('./li')
        for good_li in goods_list_li:
            item = JingdongItem()
            image_1 = good_li.xpath('./div/div[@class="p-img"]/a/img/@src').get()
            image_2 = good_li.xpath('./div/div[@class="p-img"]/a/img/@data-lazy-img').get()
            price = good_li.xpath('./div/div[@class="p-price"]/strong/i/text()').get()
            title = good_li.xpath('./div/div[@class="p-name p-name-type-2"]/a/em/text()').get()
            shop = good_li.xpath('./div/div[@class="p-shop"]/span/a/text()').get()
            if image_1 == 'done':
                item['image'] = image_2
            else:
                item['image'] = image_1
            item['price'] = price
            item['title'] = title
            item['shop'] = shop
            yield item

        """
        current_num = int(response.xpath('//*[@id="J_bottomPage"]/span[@class="p-skip"]/input/@value').get())
        if current_num <= 6:
            # 继续处理新页面
            # 等待下一页按钮出现
            
            next_button = WebDriverWait(self.driver, 10).until(
                EC.presence_of_element_located(
                    (By.XPATH, '//*[@id="J_bottomPage"]/span[@class="p-num"]/a[@class="pn-next"]'))
            )
            
            next_button = self.driver.find_element_by_xpath(
                '//*[@id="J_bottomPage"]/span[@class="p-num"]/a[@class="pn-next"]')
            print("---------next-button get---------")
            print("---------next-button get---------")
            print("---------next-button get---------")
            print("---------next-button get---------")
            print("---------next-button get---------")
            print("---------next-button get---------")
            print(response.url)
            # 点击下一页按钮
            next_button.click()
            # 将新页面的源代码传递给 Scrapy 的 Response 对象
            new_response = HtmlResponse(
                url=self.driver.current_url,
                body=self.driver.page_source,
                encoding='utf-8'
            )
            yield scrapy.Request(url=new_response.url, callback=self.parse)
            """

    def start_requests(self):
        base_url = 'https://search.jd.com/Search?'
        # keywords = ['robot', '大疆', '尼康', '佳能', '富士']
        # keywords = ['switch', '罗技', 'PS5', 'ROG', 'apple watch']

        """打开文件并读取关键字到列表中
        with open('keywords.txt', 'r', encoding='utf-8') as file:
            # 逐行读取文件内容到列表中
            lines = file.readlines()
        keywords = []
        for line in lines:
            keywords.append(line.strip())
        """

        keywords = ['电视', '电脑', '冰箱', '空调', '洗衣机', '热水器', '饮水机', '空气净化器', '扫地机器人', '电磁炉', '电饭煲', '沙发', '床垫', '衣柜',
                    '餐桌椅', '眼镜', '化妆品', '衣服', '鞋子', '手表', '珠宝', '运动器材', '自行车', '摄像机', '相机', '麦克风', '音响', '书籍', '游戏机',
                    '玩具', '乐器', '食品', '饮料', '餐具', '家居装饰', '地毯', '窗帘', '婚庆用品', '母婴用品']
        # 对每个关键词访问15页数据
        for keyword in keywords:
            params = {'keyword': keyword, 'enc': 'utf-8'}
            url_temp = base_url + urlencode(params)
            for page_num in range(1, 30, 2):
                url = url_temp + '&page=' + str(page_num)
                yield scrapy.Request(url=url, cookies=cookie)
