import requests  # 可爬取任意多页的“惠普笔记本电脑详情”，每页30个
from lxml import etree  # 不足的是，每页应该有60个商品，后三十个为动态加载，无法爬取
from fake_useragent import UserAgent
import os, re
import time

headers = {
    "User-Agent": UserAgent().random
}


def FileSave(save_path, filename, results):
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(save_path)
    path = save_path + "/" + filename + ".txt"
    with open(path, 'a+', encoding='utf-8') as fp:
        for i in results:
            fp.write("%s\n" % (i))


def Page_Level(myPage):  # 一级页面：频道名称和频道url
    dom = etree.HTML(myPage)
    zip_name_urls = []
    channel_names0 = dom.xpath('//*[@id="J_goodsList"]/ul/li/div/div[3]/a/em')
    channel_names1 = dom.xpath('//*[@id="J_goodsList"]/ul/li/div/div[2]/strong')
    channel_names2 = dom.xpath('//*[@id="J_goodsList"]/ul/li/div/div[3]/a/@href')
    for i, j, z in zip(channel_names0, channel_names1, channel_names2):
        l = i.xpath('string(.)')
        k = j.xpath('string(.)')
        z = 'https://' + str(re.findall(r'\/\/([^/:]+[^#]*)', z)[0])  # 之所以有这一步，是因为部分网址链接是不完整的
        # 这一步中的正则表达式可以取出‘//www.baidu.com/asdf4654658894’的形式
        lk = l + '----\t-------' + k + '---\t---' + z
        zip_name_urls.append(lk)
    # ["惠普(HP)暗影精灵5 15.6英寸游戏笔记本电脑(i7-9750H 8G 512GSSD GTX1650 4G独显 144Hz)￥7299.00  https://item.jd.com/100005603832.html"]
    return zip_name_urls


def spider(url):
    myPage = requests.get(url, headers=headers).content.decode("utf-8")  # 返回网页html源码
    # print(myPage)
    page_level_results = Page_Level(myPage)     # 爬取
    save_path = 'H:\\新建文件夹\\京东\\'  # 保存内容
    filename = u"笔记本电脑统计0"
    FileSave(save_path, filename, page_level_results)


if __name__ == "__main__":
    start = time.time()
    print('start......')
    for number in range(5):
        start_url = "https://search.jd.com/search?keyword=%E7%AC%94%E8%AE%B0%E6%9C%AC&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=bijib&ev=exbrand_%E6%83%A0%E6%99%AE%EF%BC%88HP%EF%BC%89%5E&page='+str(number)+'&s=1&click=0"
        spider(start_url)
    print('end')
    end = time.time()
    print('爬虫运行时间为%.4f秒' % (end - start))
