# encoding=utf8
import pymysql,requests,urllib3,datetime,re,csv
from openpyxl import Workbook
from lxml import etree
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

'''数据库配置信息'''
host = '127.0.0.1'
users = 'root'
pwd = '123123'
port = 3306
dbs = 'ans'

def mysql(id,品牌,名称,价格,店铺,商品链接):
    '''数据库入库操作'''
    value = ((id,品牌,名称,价格,店铺,商品链接))
    db = pymysql.connect(host=host, user=users,passwd=pwd,port=port, db=dbs)
    cursor = db.cursor()
    sql = "INSERT INTO jd(id,品牌,名称,价格,店铺,商品链接) values(%s,%s,%s,%s,%s,%s)"
    try:
        cursor.execute(sql, value)
        db.commit()
        print('success!')
    except Exception as e:
        db.rollback()
        print("error.", e)
    db.close()


def down(url):
    print(url)
    '''获取ur信息并返回txt'''
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.134 Safari/537.36 Edg/103.0.1264.77',
        'Cookie':'__jdv=76161171|www.bing.com|-|referral|-|1656254136650; __jdu=16562541366491405850625; areaId=7; ipLoc-djd=7-412-0-0; PCSYCityID=CN_410000_410100_0; shshshfpa=98e0919b-9bab-f953-5a2d-03e62bd4663a-1656254137; shshshfpb=q9xNhgGY158EFd2eiGe575A; __jda=122270672.16562541366491405850625.1656254137.1656254137.1656254137.1; __jdb=122270672.2.16562541366491405850625|1.1656254137; __jdc=122270672; shshshfp=9ca9cdff92eb1eee1dcd0aeef5832e21; shshshsID=e6f2db6159dbbfd47cc4f1ebbfa40a56_2_1656254170809; qrsc=1; rkv=1.0; 3AB9D23F7A4B3C9B=DRPPYPU5OXATTJNZQK32BZ6ZCSHTMFAL4M5COXQL673UYR2J4D46Y6Y6LMUXWT3OL7GYCFPI42HQMPI7HOICOBY6QU',
        'referer':'https://www.jd.com/',
    }
    res = requests.get(url=url,headers=headers).text
    # print(res)
    return res

def func(res):
        '''接受txt并解析数据'''
        tree = etree.HTML(res)
        li_list = tree.xpath('//div[@id="J_goodsList"]/ul/li')
        for li in li_list:
            try :
                id = ''.join(li.xpath('./div/div[2]/strong/i/@data-price')).strip()
                品牌 = ''.join(li.xpath('./div/div[3]/a/em//text()[1]')[0]).strip()
                名称 = ''.join(li.xpath('./div/div[3]/a/em//text()[2]')[0]+li.xpath('./div/div[3]/a/em//text()[1]')[2]).strip()
                店铺 = ''.join(li.xpath('./div/div[5]/span/a/text()')).strip()
                价格 = ''.join(li.xpath('./div/div[2]/strong/i/text()')).strip()
                商品链接 = 'https:'+''.join((li.xpath('./div/div[1]/a/@href'))).strip()
                sour = ([id,品牌,名称,价格,店铺,商品链接])
                print(id, 品牌, 名称, 价格, 店铺, 商品链接)

                """结果追加到CSV文件"""
                with open('./jd.csv',mode='a+',newline='',encoding='utf-8') as fp:
                    writer = (fp)
                    (sour)
                    print('success')

                # mysql(id, 品牌, 名称, 价格, 店铺, 商品链接)

                """保存主图，需要在目录下新建img文件夹，否则不保存"""
                resp = down(商品链接)
                tree1 = etree.HTML(resp)
                img_url = 'https:'+ ''.join(tree1.xpath('//*[@id="spec-img"]/@data-origin')).replace('.avif','')
                imgres = requests.get(url = img_url).content
                with open('./img/{}.jpg'.format(id),'wb',) as fp:
                    fp.write(imgres)
            except:
                continue




if __name__ == "__main__":
    kw = input("搜索产品：")
    for page in range(1,201):
            print(page)
            url = 'https://search.jd.com/Search?keyword={}&page={}'.format(kw,page)
            # print(url)
            res = down(url)
            # print(res)
            func(res)
