#基础要求：大家把京东商城某一频道（https://search.jd.com/Search?keyword=零食&enc=utf-8&wq;=零食&pvid=ss7fo4wi.ri9kq2）
# 的商品的商品名、商品价格、商品出售方、商品评论数等信息爬下来，存储到一个文件中，爬30-50页就行。
#附加要求（对应附加分20分）：把对应商品的评论情况爬下来，要求爬2页以上评论（如果足2页）。


import urllib.request
import re
import ssl
import sys

class getData:

    def getDataDetl(self):
        file=open("/Users/ywj/Desktop/data.txt",'w')
        for i in range(1,8):
            url="https://search.jd.com/Search?keyword=%E9%9B%B6%E9%A3%9F&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page="+str(i)+"&s=52&click=0"
            headers=("User-Agent","Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36")
            opener= urllib.request.build_opener()
            opener.addheaders=[headers]
            urllib.request.install_opener(opener)
            context1=ssl._create_unverified_context()
            data=urllib.request.urlopen(url,context=context1).read().decode("utf-8")

            pst='<li class="gl-item" data-sku="\d*" data-spu="\d*" data-pid="\d*">.*?<div class="gl-i-wrap">.*?<div class="p-img">(.*?)</div>\s*</div>\s*</li>'
            findlist = re.compile(pst,re.S).findall(data)
            for item in findlist:
                sName=""
                sNameTmp=re.compile('<div class="p-name p-name-type-2.*?>\s*<a target=.*?>(.*?)<i',re.S).findall(item)
                if len(sNameTmp)>0:
                    for item2 in re.compile('<.*?>(.*?)<.*?>',re.S).findall(sNameTmp[0]):
                        sName = sName + item2

                sPrice=""
                sPriceTmp = re.compile('<div class="p-price">\s*<.*?><em>￥</em><i>(.*?)</i>',re.S).findall(item)
                if len(sPriceTmp)>0:
                    sPrice=sPriceTmp[0]

                sShop=""
                sShopTmp=re.compile('<div class="p-shop".*?title=".*?">\s*(.*?)<',re.S).findall(item)
                if len(sShopTmp)>0:
                    sShop=sShopTmp[0]

                sCommit=""
                sCommitTmp=re.compile('<div class="p-commit">.*?<a id="J_comment.*?">(.*?)</a>',re.S).findall(item)
                if len(sCommitTmp)>0:
                    sCommit=sCommitTmp[0]
                if sName !="" and sPrice!="" and sShop!="" and sCommit !="":
                    file.write(sName+','+sPrice+','+ sShop+','+sCommit+'\r\n')

                sPid=""
                sPidTmp=re.compile('<div class="p-focus"><a class="J_focus" data-sku="(.*?)"',re.S).findall(item)
                if(len(sPidTmp)>0):
                    sPid=sPidTmp[0]
                    #self.getCommitDetlInfo(sPid,'/Users/ywj/Desktop/') 获取评论
        file.close()
    def getCommitDetlInfo(self,sPid,sSavePath):
        for i in range(0,1):
            '''
            url="https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv24629&productId="+sPid+"&score=0&sortType=5&page="+str(i)+"&pageSize=10&isShadowSku=0&rid=0&fold=1"
            headers = ("User-Agent",
                       "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36")
            opener = urllib.request.build_opener()
            opener.addheaders = [headers]
            urllib.request.install_opener(opener)
            context1 = ssl._create_unverified_context() '''

            url = "https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv24629&productId=" + sPid + "&score=0&sortType=5&page=" + str(
                i) + "&pageSize=10&isShadowSku=0&rid=0&fold=1"

            context1 = ssl._create_unverified_context()
            data = urllib.request.urlopen(url, context=context1).read()
            print(data)

if __name__ == '__main__':
    gd = getData()
    gd.getDataDetl()
