#用XPath爬取商品评论信息
from selenium import webdriver
from time import sleep

url = 'https://item.jd.com/100014352501.html' #打开一个商品的评论信息链接
browser = webdriver.Edge() #打开浏览器
browser.get(url) #获取网页信息

button = browser.find_element_by_xpath("//li[@clstag='shangpin|keycount|product|shangpinpingjia_1']")
button.click() #控制按钮进行点击
sleep(10)

u = []
c = []
s = []
ph = []
#pi = []
for i in range(10): #这里只是爬了10页，一共有100个用户数据
    #获取用户名
    user = browser.find_elements_by_xpath("//div[@class='user-info']")
    #获取评论信息
    comment = browser.find_elements_by_xpath("//p[@class='comment-con']")
    #评论获得的点赞数
    star = browser.find_elements_by_xpath("//a[@class='J-nice']")
    #购买的手机颜色、型号和评论时间
    phone = browser.find_elements_by_xpath("//div[@class='order-info']")
    #视频和图片
#    picture = browser.find_elements_by_xpath("//div[@class='pic-list J-pic-list']")
    
    #获取下一页按钮
#    button2 = browser.find_element_by_class_name("ui-pager-next")   
#    button2.click() #控制按钮进行点击
#    sleep(10)

    for i in range(0,len(user)):
        u1 = user[i].text
        c1 = comment[i].text
        s1 = star[i].text
        ph1 = phone[i].text
#        pi1 = picture[i].text
        u.append(u1)
        c.append(c1)
        s.append(s1)
        ph.append(ph1)
#        pi.append(pi1)

#让用户名和评论对应放在一个字典里
D={'用户名':u,'评论':c,'评论点赞数':s,'商品信息和评论时间':ph}
Data = []
for i in range(100):
    sum = u[i]+c[i]+s[i]+ph[i]
    Data.append(sum)

from pymongo import MongoClient  #导入数据库接口
client = MongoClient() #创建连接本地数据库
database = client['京东评论Xpath'] #创建数据库
collection = database['JD']
collection.insert(D) #保存在MongoDB上 --一次性保存







##获取评论图片并下载
from bs4 import BeautifulSoup as bs

url = 'https://item.jd.com/100014352501.html' #打开一个商品的评论信息链接
browser = webdriver.Edge() #打开浏览器
browser.get(url) #获取网页信息

button = browser.find_element_by_xpath("//li[@clstag='shangpin|keycount|product|shangpinpingjia_1']")
button.click() #控制按钮进行点击
sleep(2)

html = browser.page_source #获取源代码
soup = bs(html,'html')
pics = soup.find_all('a',class_="J-thumb-img")
#p = list(pics[0])
#print(p[0]["src"])

picture = []
for i in range(0,len(pics)):
    p = list(pics[i])
    p = p[0]["src"]
    picture.append(p) #将所有图片链接存在picture

#有些图片没有以'https:'开头，会让下一步下载图片的操作无法进行，
#所以给没有以'https:'为开头的图片连接加上'https:'
P = []    
for i in range(len(picture)):
    if picture[i][0:6]=='https:':
        s1 = picture[i]
        P.append(s1)
    else:
        s2 = 'https:'+picture[i]
        P.append(s2)

import requests as rq
header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
for i in range(len(P)):
    url_p = P[i]
    pictures = rq.get(url_p,headers = header)
    jpg = '图片'+ str(i) + '.jpg' #空的
    fp = open(jpg,'wb+') #先打开创建的空文件
    fp.write(pictures.content) #将图片内容写进空文件
    fp.close()

