from typing import Iterable
from urllib.parse import urlparse, urlencode

import scrapy
from bs4 import BeautifulSoup
from scrapy import Request
from scrapy.http import Response


class JdProductSpider(scrapy.Spider):
    """
    爬取京东商品信息
    """

    name = "jd_product"
    allowed_domains = ["search.m.jd.com"]
    start_urls = ["https://search.m.jd.com/"]

    SEARCH_KEY_WORDS = ["工装靴"]
    # cookie 最好可以设置成可配置的，因为每次爬取都需要使用一个新的 token.
    COOKIE = "__jdu=17295179698981085743729; shshshfpa=3bf545c0-33a6-176b-ac23-518bf140a0d9-1729517977; shshshfpb=BApXS8FOtt_dAA-vQweH53x7qOPxKhhEDBmsQflhp9xJ1MhxiC4G2; 3AB9D23F7A4B3C9B=RU6AZR6BW2EXX7T4B5R4IMK7IKKFGKSAPACENOYFDKUZENSOZPGD3TEIKDG6WM3MSTSOU2HA6PTJUSCQDBVTWVS62I; __jda=143920055.17295179698981085743729.1729517970.1729517970.1729605883.2; __jdb=143920055.9.17295179698981085743729|2.1729605883; __jdc=143920055; jsavif=1; 3AB9D23F7A4B3CSS=jdd03RU6AZR6BW2EXX7T4B5R4IMK7IKKFGKSAPACENOYFDKUZENSOZPGD3TEIKDG6WM3MSTSOU2HA6PTJUSCQDBVTWVS62IAAAAMSWSSTOKQAAAAADQRW22QIOPSS6MX; __jd_ref_cls=MDownLoadFloat_OpenAppSchema; _gia_d=1; cid=9; mba_muid=17295179698981085743729; mba_sid=17296058892953318279118352215.2; retina=1; wqmnx1=MDEyNjM3MnQuLzE4MGFpIGggaSBBYjUoIGVlMWJFZjQzMlVCMlJJKikl; wxa_level=1; flash=3_hsebelbGY7aw72em589pCosbLmTOB6fIshvoR08KTeRmWv_f11YhRUkIYfeqj6zJMlCLPjXYScrW49p24pUrpPebJxgP6iG_oO8DN6wWRt_ZWFLU1kWu_gK29l_E-p86ZlaYjmjCtnPKOz88GYQTrawV_mPpujrwtUSFJ90eFV**; ipLoc-djd=19-1607-4773-62121; token=88c6382a1c6dc93807737eead0a8e878,3,960892; thor=DB00E2DE97F4790563AA72379A1189ECEAA6DC6B643F99AB4A5A264CA83A612543DFA6F1801778B779B25908221C6CF3A1FE527F26B2157B79F5254FC3C6E223E61F82CC25895A6ED66353D9EBBD14698239AE010B4BDF59B6BF9238589802297125F53D0BD2D13AD0C7A5623950FD98F110009FAB40862586785BAC7352E474; _pst=%E7%8E%8B%E8%92%99%E4%BA%AC%E4%B8%9C%E5%8F%B7; _tp=9MkmhsaVFIJgw2IJ%2BWiQsABWtz89d3lSjwoNesoRDtyrInQu8BRFxpy%2Bf9ueeLmO; logintype=wx; npin=%E7%8E%8B%E8%92%99%E4%BA%AC%E4%B8%9C%E5%8F%B7; pinId=eCmIPmkBs9H4BKv0SZpG_g; visitkey=7602155293286400950; appCode=ms0ca95114; cd_eid=jdd03RU6AZR6BW2EXX7T4B5R4IMK7IKKFGKSAPACENOYFDKUZENSOZPGD3TEIKDG6WM3MSTSOU2HA6PTJUSCQDBVTWVS62IAAAAMSWSFVOWYAAAAADKZDOQAT2R45CQX; jxsid=17296058892590612837; webp=1; shshshfpx=3bf545c0-33a6-176b-ac23-518bf140a0d9-1729517977; PCSYCityID=CN_440000_440300_0; areaId=19; __jdv=76161171|direct|-|none|-|1729517969899"

    PARAMS = {
        "appid": "search-pc-java",
        "functionId": "pc_search_s_new",
        "client": "pc",
        "clientVersion": "1.0.0",
        "t": "1729607637582",
        "body":
            {"keyword": "工装靴", "wq": "工装靴", "pvid": "e0bf3bcdc6d644389d7cf06f9da932c7", "isList": 0, "page": "5",
             "s": "116", "click": "0", "log_id": "1729607619223.5655", "show_items": ""},
    }

    HEADERS = {
        "Accept": "*/*",
        "Host": "search.m.jd.com",
        "Origin": "https://search.m.jd.com",
        "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.0 Mobile/15E148 Safari/604.1",
        "Cookie": COOKIE,
    }

    MAX_PAGE = 10

    def construct_url(self, url, params):
        return "%s?%s" % (url, urlencode(params))


    def start_requests(self) -> Iterable[Request]:
        for url in self.start_urls:
            # 爬取头十页
            for p in range(1, 1 + self.MAX_PAGE):
                self.PARAMS["body"]["page"] = p
                yield Request(self.construct_url(url, self.PARAMS), headers=self.HEADERS, callback=self.parse)

    def parse(self, response: Response):
        soup = BeautifulSoup(response.text)
        for product in soup.select("li.gl-item"):
            item = {
                "sku": product["data-sku"],
                "title": product.select_one("div.p-name em").text,
                "price": product.select_one("div.p-price i").text,
                "image": "http:" + product.select_one("div.p-img img").attrs["data-lazy-img"],
            }

            yield item

