# -*- coding: utf-8 -*-
"""
@Time ： 2021/1/8 一缕青丝伴忧愁:44
@Auth ： 张张呀
@File ：yamaxun.py
@IDE ：PyCharm
@Motto：ABC(Always Be Coding)

"""
import re

import requests
import xlwt
from bs4 import BeautifulSoup
from lxml import etree


# 主函数
def saveBook(key_List, desc_list, sheet, i, url_list, name_list, price_list, chicun_List, book, savepath):
    col = ("图片所在文件夹名称", "商品名称", "商品链接", "商品价格", "商品尺寸", "商品关键字", "商品描述")
    for i2 in range(0, 7):
        sheet.write(0, i2, col[i2])  # 列名
    for i3 in range(0, len(url_list)):
        kk = key_List[i3]
        dd = desc_list[i3]
        nn = name_list[i3]
        uu = url_list[i3]
        pp = price_list[i3]
        cc = chicun_List[i3]
        print("第%d条输入完成" % ((i - 1) * 40 + (i3 + 1)))
        sheet.write((i - 1) * 40 + (i3 + 1), 0, ((i - 1) * 40 + (i3 + 1)))
        sheet.write((i - 1) * 40 + (i3 + 1), 1, nn)
        sheet.write((i - 1) * 40 + (i3 + 1), 2, uu)
        sheet.write((i - 1) * 40 + (i3 + 1), 3, pp)
        sheet.write((i - 1) * 40 + (i3 + 1), 4, cc)
        sheet.write((i - 1) * 40 + (i3 + 1), 5, kk)
        sheet.write((i - 1) * 40 + (i3 + 1), 6, dd)

    book.save(savepath)


if __name__ == '__main__':
    savepath = "国外网址数据.xls"  # 定义保存路径
    #
    pop = {'https': 'http://96.113.165.182:3128'}

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36'
    }
    m = 1

    mm = 1

    url = 'https://www.jurllyshe.com/'
    r = requests.get(url=url, headers=headers, proxies=pop).text
    e = etree.HTML(r)
    # 拿到所有分类链接集合
    hrefArray = e.xpath("//ul[@class='nav-wrap j-nav-wrap original']//a[@class='first-link']/@href")
    for div in hrefArray:
        book = xlwt.Workbook(encoding="utf-8", style_compression=0)  # 创建workbook对象
        sheet = book.add_sheet('第' + f'{mm}' + '页', cell_overwrite_ok=True)  # 创建工作表
        # 访问第一个分类链接
        for i in range(1):
            i += 1
            # 第一个分类的第一页
            div = div + f'?___store=default&p={i}'
            r2 = requests.get(url=div, headers=headers, proxies=pop).text
            # 获取第一个分类的第一页的所有链接href2Array
            e2 = etree.HTML(r2)
            href2Array = e2.xpath("//div[@class='product-image-wrapper ban-click']")
            # 定义集合
            key_List = []
            desc_list = []
            url_list = []
            name_list = []
            price_list = []
            chicun_List = []
            for div2 in href2Array:
                # 第一个分类第一页的第一个商品链接
                mkUrl = div2.xpath('./a/@href')[0]
                print('正在抓取的商品链接是---------' + mkUrl)
                r3 = requests.get(url=mkUrl, headers=headers, proxies=pop).text
                # 商品详情页
                e3 = etree.HTML(r3)
                # 地址
                url_list.append(mkUrl)
                # 第一页获取标题集合
                name = e3.xpath('//h1[@class="h1box"]//text()')[0]
                print('这是标题:' + name)
                name_list.append(name)
                # 第一页价格集合
                page_two = BeautifulSoup(r3, "html.parser")
                price = page_two.find('div', class_='totalprice').find('span').text.strip()

                ppp = re.compile(r'<span\sclass="price">(.*?)<\/span>').findall(r3)
                print(ppp)
                yy = page_two.find('meta', attrs={"name": "keywords"})['content']
                print('这是关键字:' + yy)
                key_List.append(yy)
                # 关键字
                key = e3.xpath("//meta[@name ='description']/@content")[0]
                print('这是描述:' + key)
                desc_list.append(key)

                # price = e3.xpath('//div[@class="totalprice  "]/span[@class="price"]//text()')
                price_list.append(price)
                # 第一页尺寸集合gw_options_size
                # chicun = page_two.find('div', class_='gw_options_size').text.strip()
                # chicun = e3.xpath("//a[@class='swatch-span proDetail-option_item']//text()")
                # print(chicun)
                # cuncun = ''
                # for cc in chicun:
                #     print(cc)
                #     cuncun = cuncun + '---------' + cc
                # print('这是尺寸:' + cuncun)
                chicun_List.append('')
                # 所有的图片地址
                # href3Array = e3.xpath("//li[@class='gw-product-click-big-img-show']//img/@bimg")
                # n = 1
                # os.mkdir('%s' % m)
                # for imgUrl in href3Array:
                #     print(imgUrl)
                #     imgname = os.path.join(os.getcwd(), str(m), str(n) + '.jpg')
                #     print(imgname)
                #     img = requests.get(imgUrl, stream=True)
                #     with open(imgname, 'wb') as f:
                #         f.write(img.content)
                #         f.close()
                #     n += 1
                print(f'第{m}个商品的所有图片下载完成')
                m += 1
            # 存储第一页的信息到表格
            saveBook(key_List, desc_list, sheet, i, url_list, name_list, price_list, chicun_List, book, savepath)
        mm += 1
