import requests
import json
import time
from bs4 import BeautifulSoup


def jd_search(keyword):
    headers = {
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
        'Cookie': 'areaId=17; ipLoc-djd=17-1381-50718-0; rkv=1.0; qrsc=3;',
    }
    url = 'https://search.jd.com/Search'
    params = {
        'keyword': keyword,
        'enc': 'utf - 8',
        'psort': '3'
    }
    jd = requests.get(url=url, headers=headers, params=params)
    return jd.text


def get_goods_info(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
    }
    goods_info = requests.get(url=url, headers=headers)
    goods_soup = BeautifulSoup(goods_info.text, 'html.parser')
    name = goods_soup.select('body > div > div > div.itemInfo-wrap > div.sku-name')[0]
    if name.string is None:
        name = goods_soup.select('body > div > div > div.itemInfo-wrap > div.sku-name > img')[0].string.strip()
    else:
        name = name.string.strip()
    # 价格后续再研究研究，暂时有点问题
    # price = goods_soup.select('div.dd > span.pricing > del')[0].string
    # 有些商品没有第一分类（通常为颜色）
    category1 = goods_soup.select('#choose-attr-1 > div.dd > div.item.selected')
    if len(category1) == 0:
        category1 = '无'
    else:
        category1 = category1[0]['data-value']
    category2 = goods_soup.select('#choose-attr-2 > div.dd > div.item.selected')
    # 有些商品没有第二分类
    if len(category2) == 0:
        category2 = '无'
    else:
        category2 = category2[0]['data-value']
    goods = {'url': url,
             'name': name,
             # 'price': price,
             'category1': category1,
             'category2': category2}
    return goods


if __name__ == '__main__':
    # 根据关键字京东搜索，并获取销量前5的商品详细信息
    key = '洗衣液'
    jd_html = jd_search(key)

    # 获取所有商品的详情链接（一页默认加载了30个）
    soup = BeautifulSoup(jd_html, 'html.parser')
    goods_list = soup.select('#J_goodsList > ul > li > div > div.p-name.p-name-type-2 > a')
    link_list = [f"https:{item['href']}" for item in goods_list if item['href']]
    print(link_list)

    # 获取前5个商品的详细信息
    top_5_goods = []
    for i in range(5):
        time.sleep(0.5)
        info = get_goods_info(link_list[i])
        top_5_goods.append(info)

    with open(f'jd_top5_{key}.json', 'w', encoding='utf-8') as f:
        json.dump(top_5_goods, f, ensure_ascii=False)
