import fake_ip_ua as fu
import urllib.request as ur
import re
import os
import time
import requests as rs
from bs4 import BeautifulSoup as bp
#获取所有的URL
def getAllUrls(key):
    k = ur.quote(key)
    urls = []
    earphoneurls = []
    for i in range(30):
        url = 'https://search.jd.com/Search?keyword=' + k + '&enc=utf-8&page=' + str(2 * i + 1)
        urls.append(url)
    for url in urls:
        soup = getHtml(url)
        pat1 = re.compile('li class="gl-item" data-sku="(.*?)"')
        items = pat1.findall(soup)
        for item in items:
            earphoneurls.append('https://item.jd.com/' + item + '.html')
    return earphoneurls


# 使用requests模块感觉爬取更稳定点
def getHtml(urls):
    r0 = rs.get(urls)
    r0.encoding = r0.apparent_encoding
    data = r0.text.encode(r0.encoding, 'ignore')
    data = data.decode('gbk', 'ignore')
    return data

#获取耳机名称
def getName(phoneurl):
    infoText = getHtml(phoneurl)
    soup = bp(infoText, 'html.parser')
    name = soup.find('div', attrs={'class': 'sku-name'}).get_text().strip()
    return name

#获取价格
def getPrice(earphoneurl):
    pattern = re.compile('.(\d+).')
    id_phone = re.findall(pattern, earphoneurl)[0]
    # 一个查价格的网址
    serch_url = 'http://p.3.cn/prices/mgets?skuIds=J_' + id_phone
    html = getHtml(serch_url)
    pattern_price = re.compile('"p":"(\d+\.\d+)"')
    price = re.findall(pattern_price, html)
    return price

#获取卖家名称
def getSaler(earphoneurl):
    soup = getHtml(earphoneurl)
    pat = re.compile('a href=".*?" target="_blank" title="(.*?)" clstag')
    Saler = pat.findall(soup)
    return Saler[0]

#获取评价数量
def getCommentsNum(earphoneurl):
    pattern = re.compile('.(\d+).')
    commentNum = {}
    id_phone = re.findall(pattern, earphoneurl)[0]
    com_url = 'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv1694&productId=' + id_phone + '&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1'
    data = getHtml(com_url)
    pat1 = re.compile('"poorCountStr":"(.*?)"')
    pat2 = re.compile('"generalCountStr":"(.*?)"')
    pat3 = re.compile('"goodCountStr":"(.*?)"')
    pat4 = re.compile('"commentCountStr":"(.*?)"')
    poorNum = pat1.findall(data)
    generalNum = pat2.findall(data)
    goodNum = pat3.findall(data)
    totleNum = pat4.findall(data)
    commentNum[u'差评'] = poorNum[0]
    commentNum[u'中评'] = generalNum[0]
    commentNum[u'好评'] = goodNum[0]
    commentNum[u'总评价数'] = totleNum[0]
    return commentNum

#爬取前三页评论，一页10个评论
def getComments(earphoneurl, num):
    pattern = re.compile('.(\d+).')
    id_phone = re.findall(pattern, earphoneurl)[0]
    comments = []
    com_urls = []
    for i in range(num):
        url = 'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv1694&productId=' + id_phone + '&score=0&sortType=6&page=' + str(
            i) + '&pageSize=10&isShadowSku=0&fold=1'
        pat2 = re.compile('"content":"(.*?)"')
        data = ur.urlopen(url).read().decode('GBK', 'ignore')
        comment = pat2.findall(data)
        for item in comment:
            comments.append(item)
    i = 0
    # 删除重复评论
    while i < len(comments):
        if re.search('div', comments[i]) != None:
            del comments[i]
            continue
        i = i + 1
    # 删除重复评论2
    i = 0
    while i < len(comments) - 1:
        if comments[i] == comments[i + 1]:
            del comments[i + 1]
            continue
        i = i + 1
    return comments

#统计所有的数据并整合
def getInfo(earphoneurl):
    earPhoneIofo = {}
    Saler = getSaler(earphoneurl)
    earPhoneIofo.update({'卖家': Saler})
    comment = getCommentsNum(earphoneurl)
    earPhoneIofo.update({'评价数': comment})
    price = getPrice(earphoneurl)
    earPhoneIofo.update({'价格': price})
    name = getName(earphoneurl)
    earPhoneIofo.update({'名称': name})
    return earPhoneIofo


if __name__ == '__main__':
    header = fu.headers
    opener = ur.build_opener()
    opener.addheaders = [header]
    ur.install_opener(opener)
    urls = getAllUrls('耳机')
    path = 'D:/week3/'
    #判断生成文件夹
    if os.path.exists(path):
        pass
    else:
        os.mkdir(path)
    #判断生成评论专用文件夹
    path2 = 'D:/week3/comment/'
    if os.path.exists(path2):
        pass
    else:
        os.mkdir(path2)
    earphoneInfoAll = []
    startTime = time.clock()
    for url in urls:
        print('...正在爬取第' + str(urls.index(url) + 1) + '部耳机...')
        info = getInfo(url)
        earphoneInfoAll.append(info)
        earName = info['名称']
        # 去掉非法字符
        earName = re.sub(r'[\/:*?"<>|]', '-', earName)
        phonePath = 'D:/week3/comment/' + earName + '.txt'
        comments = getComments(url, 3)
        f = open(phonePath, 'w')
        f.close()
        f = open(phonePath, 'a')
        for item in comments:
            f.write(item)
            f.write('\n')
        f.close()
        print('done!')
    uk = 'D:/week3/earphone_data.txt'
    f = open(uk, 'w')
    f.close()
    f = open(uk, 'a')
    for item in earphoneInfoAll:
        f.write(str(item))
        f.write('\n')
    f.close()
    endTime = time.clock()
    print('爬取完毕，耗时为：', endTime - startTime)









