import re
import urllib.request
import random
class JD:
  def __init__(self):
      self.data=""
      self.i=""
      self.uapools=["Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
           "	Mozilla/5.0 (Windows NT 6.3; W…) Gecko/20100101 Firefox/58.0",
           "User-Agent:Mozilla/4.0(compatible;MSIE7.0;WindowsNT6.0)",
           "User-Agent:Opera/9.80(WindowsNT6.1;U;en)Presto/2.8.131Version/11.11",]
      self.ua=random.choice(self.uapools)
      self.ip="127.0.0.1:8888"
      self.thisip=urllib.request.ProxyHandler({"http":self.ip})
      self.opener=urllib.request.build_opener(self.thisip,urllib.request.HTTPHandler)
      self.headers=("User-Agent",self.ua)
      self.opener.addheaders=[self.headers]
      urllib.request.install_opener(self.opener)
  def snack(self):
       for self.i in range(1,31):#爬京东零食1—30页
          print("第"+str(self.i)+"页")
          self.url="https://search.jd.com/Search?keyword=%E9%9B%B6%E9%A3%9F&enc=utf-8&page="+str(self.i*2-1)
          self.data=urllib.request.urlopen(self.url).read().decode("utf-8","ignore")#京东零食的爬取
          print(len(self.data))
          self.comment()
          self.link='<div class="p-name p-name-type-2.*? href="(.*?)"'#各个零食网页的正则
          self.price='<div class="p-price".*?<i>(.*?)</i>'#商品价格的正则
          self.shop='<div class="p-shop".*?title="(.*?)">'#商品出售方的正则
          self.cnums='<div class="p-commit".*?<a .*?">(.*?)</a>'#商品评论数的正则
          thislink=re.compile(self.link,re.S).findall(self.data)#各个零食的网页链接
          thisprice=re.compile(self.price,re.S).findall(self.data)#商品价格
          thisshop=re.compile(self.shop,re.S).findall(self.data)#商品出售方
          thiscnums=re.compile(self.cnums,re.S).findall(self.data)#商品评论数
          for k in range(0,len(thislink)):
              try:
                thisurl=thislink[k]
                if(thisurl.find("https:")==-1):
                  thisurl="https:"+thisurl
                thisdata=urllib.request.urlopen(thisurl).read().decode("gbk","ignore")#各个零食网页内容的爬取
                name='<title>(.*?)</title>'#商品名称的正则
                self.thisname=re.compile(name).findall(thisdata)#各个零食的名称
                fh=open("E:\\python\\python练习\\京东零食.txt","a")
                if(len(thisshop[k])<=20):
                  pass
                else:
                  thisshop[k]="京东自营"
                a={"商品名称":str(self.thisname[0]),"商品价格":str(thisprice[k]),"商品出售方":str(thisshop[k]),"商品评论数":str(thiscnums[k])}
                fh.write(str(a)+'\n')
                fh.close()
                b=self.thisname[0]
              except Exception as error:
                print("存在爬取不成功的零食")
                print(error)
  def comment(self):#商品评论
        pat='<div class="p-commit">.*?href="//item.jd.com/(.*?).html'
        rst=re.compile(pat,re.S).findall(self.data)
        for j in range(0,len(rst)):
            try:
                wr=open("E:\\python\\python练习\\评论\\第"+str(self.i)+"页第"+str(j+1)+"个零食评论"+".txt","a")
                pid=rst[j]
                for i in range(1,4):
                    curl="https://sclub.jd.com/comment/productPageComments.action?&productId="+str(pid)+"&score=0&sortType=5&page="+str(i)+"&pageSize=10"
                    cdata=urllib.request.urlopen(curl).read().decode("gbk","ignore")
                    comment='{"id":.*?,"topped":.*?,"content":"(.*?)"'
                    crst=re.compile(comment).findall(cdata)
                    for k in range(0,len(crst)):
                        thiscomment=crst[k]
                        wr.write(thiscomment+'\n\n')
                wr.close()
            except Exception as err:
                print("存在爬取不成功的评论")
                print(err)
j=JD()
j.snack()
j.comment()
