from urllib import request,error,parse
import re
import time
import os
import random

#京东零食url
jingdongurl = "https://search.jd.com/Search?keyword=%E9%9B%B6%E9%A3%9F&enc=utf-8&wq;=%E9%9B%B6%E9%A3%9F&pvid=ss7fo4wi.ri9kq2"

#浏览器代理
uapools = [
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
            "Mozilla/4.0(compatible;MSIE7.0;WindowsNT5.1;Maxthon2.0)",
            "Mozilla/4.0(compatible;MSIE7.0;WindowsNT5.1)",
            "Mozilla/4.0(compatible;MSIE7.0;WindowsNT5.1;360SE)"
            "Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1"
]

#获取10个代理IP
ipagent = "http://www.89ip.cn/apijk/?&tqsl=10&sxa=&sxb=&tta=&ports=&ktip=&cf=1"

#Headers
headers ={}

#获取代理IP
def ip():
    for i in range(0,10):
        try:
            ipall = request.urlopen(ipagent,timeout=3).read().decode("UTF-8","ignore")
            #print(ipall)
            ippools = []
            ippat = '\d+\.\d+\.\d+\.\d+\:\d+'
            ippools = re.compile(ippat,re.S).findall(ipall)
            #print(ippools)
            return ippools
        except Exception as err:
            if hasattr(err, "code"):
                print(err.code)
            if hasattr(err, "reason"):
                print(err.reason)
    return False

#使用代理IP和伪装浏览器
def ua_ip(ippools,time,uapools):
    thisua = random.choice(uapools)
    print(thisua)
    headers = ("User-Agent",thisua)
    thisip = ippools[time]
    print(thisip)
    proxy = request.ProxyHandler({"http":thisip})
    opener = request.build_opener(proxy,request.HTTPHandler)
    opener.addheaders = [headers]
    request.install_opener(opener)

#获取商品的url列表
def get_urlist(time):
    page = time*2+1
    s = time*26+1
    if (time == 0):
        data = request.urlopen(jingdongurl, timeout=30).read().decode("UTF-8", "ignore")
    else:
        url = 'https://search.jd.com/Search?keyword=%E9%9B%B6%E9%A3%9F&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page='+str(page)+'&s='+str(s)
        data = request.urlopen(url, timeout=30).read().decode("UTF-8", "ignore")
    # pat_url = '<div class="p-name p-name-type-2">.*?<a target="_blank" title=.*?href="(.*?)"'
    # urllist1 = re.compile(pat_url, re.S).findall(data)
    # print(urllist1)
    # print(len(urllist1))
    pat_pid = '<strong class="J_(.*?)"'
    pidlist1 =  re.compile(pat_pid).findall(data)
    headers['Referer'] = 'https://search.jd.com/Search?keyword=%E9%9B%B6%E9%A3%9F&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page='+str(page)+'&s='+str(s)
    request_url = 'https://search.jd.com/s_new.php?keyword=%E9%9B%B6%E9%A3%9F&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page='+str(page+1)+'&s='+str(s+26)+'&scrolling=y'
    req = request.Request(request_url,None,headers)
    data = request.urlopen(req).read().decode("UTF-8","ignore")
    # urllist2 = re.compile(pat_url, re.S).findall(data)
    # print(urllist2)
    # print(len(urllist2))
    # urllist = urllist1+urllist2
    # return urllist
    pidlist2 = re.compile(pat_pid).findall(data)
    pidlist = pidlist1+pidlist2
    return pidlist

#获取商品的商品名、商品价格、商品出售方、商品评论数
def get_detail(url,pid):
    detail = []
    try:
        # 商品名
        data = request.urlopen(url, timeout=30).read().decode("gbk", "ignore")
        #pat_name = '<div class="sku-name">.*?"(.*?)"</div>'
        pat_name = '<div class="item ellipsis" title=".*?">(.*?)</div>'
        name = re.compile(pat_name,re.S).findall(data)
        # print(name)
        # print(name[0].strip())
        detail.append(name[0])
        #商品价格
        price_url = 'https://p.3.cn/prices/mgets?skuIds=J_'+str(pid)
        # print(price_url)
        price_data = request.urlopen(price_url, timeout=30).read().decode("gbk", "ignore")
        pat_price = '"p":"(.*?)"'
        price = re.compile(pat_price).findall(price_data)
        # print(price)
        detail.append(price[0])
        #商品出售方
        pat_seller = '<div class="name">.*?<a href=".*?" target="_blank" title=".*?".*?>(.*?)</a>'#.*?<em class="u-jd">'
        seller = re.compile(pat_seller,re.S).findall(data)
        # print(seller)
        if seller:
            # print(seller)
            detail.append(seller[0])
        else:
            detail.append('JD自营')
            # print("JD自营")
        #商品评论数
        comment_url = 'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv54&productId='+str(pid)+'&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1'
        comment_data = request.urlopen(comment_url,timeout=30).read().decode("gbk", "ignore")
        pat_comment_num = 'commentCountStr":"(.*?)"'
        comment_num = re.compile(pat_comment_num).findall(comment_data)
        # print(comment_num)
        detail.append(comment_num[0])
        # print(detail)
        return detail
    except Exception as err:
        print(err)
        if hasattr(err, "code"):
            print(err.code)
        if hasattr(err, "reason"):
            print(err.reason)

#获取商品的评论并保存
def get_comments(pid):
    for i in range(0,3):
        comment_url = 'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv54&productId='+str(pid)+'&score=0&sortType=5&page='+str(i)+'&pageSize=10&isShadowSku=0&fold=1'
        comment_data = request.urlopen(comment_url, timeout=30).read().decode("gbk", "ignore")
        pat_comment = '"content":"(.*?)"'
        comments = re._compile(pat_comment).findall(comment_data)
        return comments

data_file = "jingdong_data.txt"
dir_comments = "comment"
fh = open(data_file,'w',encoding='gbk')
if(not os.path.exists(dir_comments)):
        os.mkdir(dir_comments)
ippools = ip()
if ippools:
    for i in range(0,10):
        ua_ip(ippools,i,uapools)
        try:
            for time in range(0,35):
                print("第"+str(time+1)+"页")
                jdlist = get_urlist(time)
                print(jdlist)
                # print(len(jdlist))
                # for url in jdlist:
                for pid in jdlist:
                    # print(pid)
                    # pat_pid = '//item.jd.com/(.*?).html'
                    # pid = re.compile(pat_pid,url)
                    # print(pid)
                    url = "https://item.jd.com/"+str(pid)+".html"
                    # print(url)
                    detail = get_detail(url,pid)
                    # print(detail[0])
                    write_data = '{"商品名":'+str(detail[0])+',"商品价格":'+str(detail[1])+'元,"商品出售方":'+str(detail[2])+',"商品评论数":'+str(detail[3])+'}\n'
                    print(str(write_data))
                    fh.write(write_data)
                    pinglun_file = "D:\\python\pythonQuanZhanXueXi\\"+str(pid)+"pinglun.txt"
                    pinglun_fh = open(pinglun_file,'w',encoding='gbk')
                    comments = get_comments(pid)
                    for j in range(len(comments)):
                        pinglun_fh.write(comments[j]+'\n')
                    pinglun_fh.close()
                    # break
                # break
            fh.close()
        except Exception as err:
            if hasattr(err, "code"):
                print(err.code)
            if hasattr(err, "reason"):
                print(err.reason)
        break
else:
    print("IP Agent is broken!!")
