import requests
from bs4 import BeautifulSoup

# 经常请求不到，这里做一个缓存处理
import requests_cache

requests_cache.install_cache('jd_cache')

url = 'https://search.jd.com/search?keyword=python'

header_value = {
    'Referer': 'https://search.jd.com/search?keyword=python',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.55',
    'Cookie':
        '__jdu=16448122780871481326052; shshshfpa=17eb52a3-dbc6-3161-aeb0-d6c3e8ce1c98-1644812279; __jdv=122270672|direct|-|none|-|1648432866268; __jdc=122270672; areaId=19; ipLoc-djd=19-1609-0-0; shshshfpb=cfqeTCChXnGl_WVK-xKPZJw; rkv=1.0; qrsc=3; shshshfp=9683077b4875b6270a19ebddb67dd409; __jda=122270672.16448122780871481326052.1644812278.1648453272.1648455423.5; __jdb=122270672.1.16448122780871481326052|5.1648455423; shshshsID=1c5dc855368aa25caf4623b9c36370ec_1_1648455423628; 3AB9D23F7A4B3C9B=X2QPCC3XC2WH6X2K3NDHNTZJVJQ2UJKLELT4WLF46ZA7AY2QKYFY3L4RBYDCYG5PS6EG4UBMUIUCS5XUHSPN5RHTXY'
}

# 注意！这里京东服务器是针对cookie有一个防止重复使用访问的机制！
r = requests.get(url, headers=header_value)
print(r.text)
# print(r.headers)
soup = BeautifulSoup(r.text, 'lxml')
node_li = soup.select('.gl-item')
for li in node_li:
    p_name = li.select('.p-name em')[0].text.strip()
    p_price = li.select('.p-price strong')[0].text.strip()
    p_shopnum = li.select('.p-shopnum a')[0].text
    print(p_name, p_price, p_shopnum)
