
import urllib.request
import urllib.parse
from lxml import etree
import pymysql

def create_request(page):
    base_url = 'https://www.cnhnb.com/p/blm-0-0-0-0-'
    # data = urllib.parse.urlencode(data)
#   https://www.cnhnb.com/p/mianfen-0-0-0-0-1/
    url = base_url + str(page) + '/'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0'
        ,'Cookie':'deviceId=666e123-049f-40cb-bf1a-4d5b55b74; Hm_lvt_91cf34f62b9bedb16460ca36cf192f4c=1703744458,1703775363,1703812370,1704702254; Hm_lvt_0e023fed85d2150e7d419b5b1f2e7c0f=1703744458,1703775363,1703812370,1704702254; Hm_lvt_a6458082fb548e5ca7ff77d177d2d88d=1703744464,1703775364,1703812381,1704702407; Hm_lpvt_91cf34f62b9bedb16460ca36cf192f4c=1704702572; Hm_lpvt_0e023fed85d2150e7d419b5b1f2e7c0f=1704702572; Hm_lpvt_a6458082fb548e5ca7ff77d177d2d88d=1704702572' }
    request = urllib.request.Request(url=url, headers=headers)
    return request
def get_content(request):
    response = urllib.request.urlopen(request)
    content = response.read().decode('utf-8')
    return content

def down_load(page,content):
    name_list = []
    price_list = []

    name_list.append(tree.xpath('//h2/text()'))
    price_list.append(tree.xpath('//span[@class="price"]/text()'))
    # with open('面粉' + str(page) + '.json','w', encoding='utf-8') as fp:
    #     fp.write(content)

def save_mysql(all_data):
    conn = pymysql.connect(host='127.0.0.1', user='root', port=3306, password='123456', db='mydb', charset='utf8')
    curson = conn.cursor()
    # 创建表sql语句
    # 插入数据SQL语句
    insertsql = 'insert into formations(product_name,product_price,supplier,addr,product_desc,product_img) values (%s,%s,%s,%s,%s,%s)'
    for data in all_data:
        data = tuple(data)
        print(data)
        curson.execute(insertsql, data)
    conn.commit()


## todo：supplier_web = i.find("a").attrs['href']  # 供应商网址


# 程序的入口
if __name__ == '__main__':
    start_page = int(input('请输入起始的页码:'))
    end_page = int(input('请输入结束的页码:'))


    for page in range(start_page, end_page+1):
        request = create_request(page)
        content = get_content(request)
        tree = etree.HTML(content)
        # # 下载
        # down_load(page, content)

        product_all = []
        name_list = tree.xpath('//div[@class="shop-image"]//img//@alt') # 面粉名称
        product_name = [i for i in name_list if i != '']
        danwei =tree.xpath('//div[@class="shops-price"]/text()') # 价格
        product_price=tree.xpath('//span[@class="sp1"]/text()')
        for i in range(len(danwei)):
            product_price[i]=product_price[i]+danwei[i].replace('\n','').strip()
        supplier = tree.xpath('//a[@class="l-shop-btm"]/text()') # 供应商
        add1=tree.xpath('//div[@class="r-shop-btm"]/descendant-or-self::*')# 发货地
        addr=[]
        for i in add1:
            addr.append(i.text)
        product_desc = tree.xpath('//div[@class="shop-image"]/img/@alt') # 产品介绍
        product_img = tree.xpath('//div[@class="shop-image"]/img/@src') # 产品图片
        for i in range(len(product_name)):
            line=[]
            line.append(product_name[i])
            line.append(product_price[i])
            line.append(supplier[i])
            line.append(addr[i])
            line.append(product_desc[i])
            line.append(product_img[i])
            product_all.append(line)
        print(product_all)
        # save_mysql(product_all)