import scrapy
from bs4 import BeautifulSoup
class Jdcrawler(scrapy.Spider):
    name = "jd_search"

    def start_requests(self):
        keyword_array = ["键盘", "鼠标", "显示器"]
        for keyword in keyword_array:
            for page in range(1, 6):
                url = f"https://search.jd.com/Search?keyword={keyword}&psort=3&wq={keyword}&psort=3&page={page}&s=61&click=0"
                yield scrapy.FormRequest(
                    url=url,
                    method="GET",
                    #formdata = data   post请求带上的参数
                    callback=self.parse_search,
                    #errback = self.process_error
                )
            break

    def parse_search(self, response): #解析器
        print(response)
        html = response.body.decode("utf-8")
        soup = BeautifulSoup(html, "lxml")
        print(soup)