import requests
from bs4 import BeautifulSoup
import re
import json
import csv
import os
import sys
import io
import time
from collections import OrderedDict

# 通过URL获取网页文本
def getHtmlText(url):
    proxies = {'http': '182.217.122.1	80'}
    # 使用伪装浏览器和代理IP（被封了就惨了）
    r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64)'}, proxies = proxies)
    r.encoding = r.apparent_encoding
    return r.text

# 将网页原生的是否推荐的True和False替换为是和否
def changeRecommnedType(inputBool):
    if inputBool == True:
        return '是'
    else:
        return '否'

# 生成零食1-50页的url列表(最初调用一次，全局使用)
def getAllPages():
    allPagesUrlList = []
    singlePageUrl = ''
    for page in range(1, 50):
        singlePageUrl = 'https://search.jd.com/Search?keyword=零食&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page=' + str(
            pagetrans*2-1) + '&s=53&click=0'
        allPagesUrlList.append(singlePageUrl)
    return allPagesUrlList


# 在零食概览界面网页获取零食详细信息链接
def getsnacksUrl(pageUrl):
    snacksUrlList = []
    html = getHtmlText(pageUrl)
    soup = BeautifulSoup(html, 'html.parser')
    items = soup.find_all('div',attrs={'class':'p-name'})
    for item in items:
        snacksUrlList.append('https:' + item.a.get('href'))
    return snacksUrlList

# 获取零食价格
def getsnackPrice(snackurl):
    # 由于价格不在主页面显示，通过抓包找到显示价格的网址，以物品编号为区别特征
    priceUrl = 'https://p.3.cn/prices/mgets?skuIds=J_' + snackurl[20:-5]
    priceText = getHtmlText(priceUrl)
    pattern = re.compile('"p":"(.*?)"')
    price = re.findall(pattern, priceText)[0]
    return price

# 获取零食名称
def getsnackName(snackurl):
    infoText = getHtmlText(snackurl)
    soup = BeautifulSoup(infoText, 'html.parser')
    name = soup.find('div', attrs={'class': 'sku-name'}).get_text().strip()
    return name

# 获取零食图片链接(-----还要加下载的方法-----)
def getsnackImages(snackurl):
    infoText = getHtmlText(snackurl)
    soup = BeautifulSoup(infoText, 'html.parser')
    imgDiv = soup.find('div', attrs={'class': 'spec-items'})
    snackImageLink = []
    for img in imgDiv.findAll('img'):
        snackImageLink.append('https:' + img.get('src'))
    return snackImageLink

# 获取零食的属性信息
def getsnackProperties(snackurl):
     snackProperties = {}
     list_value = []
     list_name = []

     infoText = getHtmlText(snackurl)
     soup = BeautifulSoup(infoText, 'html.parser')
     proSection = soup.findAll('div', attrs={'class': 'Ptable-item'})

     for pro in proSection:
         # 既然找不到直接去除有属性标签的方法就取个差集吧
         list_all = pro.find_all('dd')
         list_extracted = pro.find_all('dd', {'class': 'Ptable-tips'})
         list_chosen = [i for i in list_all if i not in list_extracted]

         for dd in list_chosen:
             list_value.append(dd.string)

         for dt in pro.find_all('dt'):
             list_name.append(dt.string)

     for i in range(0, len(list_name)):
         snackProperties.update({list_name[i]: list_value[i]})

     return snackProperties

# 获取该买该零食的评论信息
def getsnackComments(snackurl):
    snackId = snackurl[20:-5]
    commentStartUrl = 'https://club.jd.com/comment/productPageComments.action?callback=' \
                      'fetchJSON_comment98vv10636&productId=' + snackId + '&score=0&sortType=5&page=0&pageSize=10'
    htmlText = getHtmlText(commentStartUrl)
    jsonText = json.loads(htmlText[27:-2])
    # 获取最大页面数便于爬取
    maxPage = jsonText['maxPage']
    # 零食评价信息概览
    commentSummaryDict = {}
    commentSummary = jsonText['productCommentSummary']
    commentSummaryDict.update({'好评率': str(commentSummary['goodRateShow']) + '%'})
    commentSummaryDict.update({'评论数': commentSummary['commentCountStr']})
    commentSummaryDict.update({'晒图': jsonText['imageListCount']})
    commentSummaryDict.update({'追评数': commentSummary['afterCountStr']})
    commentSummaryDict.update({'好评数': commentSummary['goodCountStr']})
    commentSummaryDict.update({'中评数': commentSummary['generalCountStr']})
    commentSummaryDict.update({'差评数': commentSummary['poorCountStr']})

    # 获取全部的评价内容
    userCommentList = []
    if maxPage>20:
     maxPage=20
    for commentPage in range(0,maxPage):
        commentPageUrl = 'https://club.jd.com/comment/productPageComments.action?callback=' \
                         'fetchJSON_comment98vv10636&productId=' + snackId + '&score=0&sortType=5&' \
                         'page=' + str(commentPage) + '&pageSize=10'
        commentHtmlText = getHtmlText(commentPageUrl)
        # 评论可多可少，出错就直接跳过
        try:
            commentJsonText = json.loads(commentHtmlText[27:-2])
            comments = commentJsonText['comments']

            for comment in comments:
                commentsInfo = {}
                commentsInfo.update({'昵称': comment['nickname']})
                commentsInfo.update({'用户等级': comment['userLevelName']})
                commentsInfo.update({'评论星级': str(comment['score']) + '星'})
                commentsInfo.update({'内容': comment['content']})
                commentsInfo.update({'商品名称': comment['productColor'] + ',' + comment['productSize']})
                commentsInfo.update({'发表时间': comment['creationTime']})
                commentsInfo.update({'点赞数': comment['usefulVoteCount']})
                commentsInfo.update({'评论回复次数': comment['replyCount']})
                commentsInfo.update({'是否推荐': changeRecommnedType(comment['recommend'])})
                commentsInfo.update({'客户端': comment['userClientShow']})

                userCommentList.append(commentsInfo)
        except:
            continue
        print('******正在爬取第'+str(commentPage)+'页评论')

    return commentSummaryDict, userCommentList

# 将零食的特征添加到一起
def getsnackInfo(snackurl):
    snackInfo = {}
    price = getsnackPrice(snackurl)
    snackInfo.update({'价格':price})  # 字符串
    name = getsnackName(snackurl)
    snackInfo.update({'名称': name})  # 字符串
    snackImageLink = getsnackImages(snackurl)
    snackInfo.update({'图片链接':snackImageLink})   # 字符串列表
    commentSummaryDict, userCommentList = getsnackComments(snackurl)
    snackInfo.update({'零食整体评价':commentSummaryDict})    # 字典
    snackInfo.update({'零食全部评价内容':userCommentList})    # 元素是字典的列表

    return snackInfo

if __name__ == '__main__':
    url = 'https://search.jd.com/Search?keyword=零食&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page=1&s=53&click=0'
    rootPath = 'jdls/'
    snackInfoAll = []

    snackUrls = getsnacksUrl(url)
    # 程序开始时间
    startTime = time.clock()
    if not os.path.exists(rootPath):
     os.makedirs(rootPath)
    for snackurl in snackUrls:
        if '//item.jd.com/' not in snackurl:
         continue
        print('正在爬取第', str(snackUrls.index(snackurl) + 1), '个零食......')

        info = getsnackInfo(snackurl)
        snackInfoAll.append(info)

        snackName = info['名称']

        # 创建文件夹(先判断是否存在，因为有重复的零食)
        dirPathToMake = rootPath + snackName
        if os.path.exists(dirPathToMake):
            continue
        else:
            os.makedirs(dirPathToMake)

        # 写入评论
        commentsHeader = ['昵称', '用户等级', '评论星级', '内容',  '发表时间', '点赞数', '评论回复次数',
                          '是否推荐', '客户端']
        userCommentList = info['零食全部评价内容']
        with open(rootPath + snackName + '/' + 'comments.csv', 'w', newline='', encoding='gb18030') as wcom:
            writer = csv.writer(wcom)
            writer.writerow(commentsHeader)
            for comment in userCommentList:
                    tempList = []
                    for commentInfo in commentsHeader:
                        tempList.append(comment[commentInfo])
                    writer.writerow(tempList)


    # 爬取所有零食信息后，做一个整体的统计分析
    headers = ['名称', '价格', '好评率', '评论数', '晒图', '追评数', '好评数', '中评数', '差评数']
    with open(rootPath+'snacksInfo.csv', 'a', newline='', encoding='gb18030') as file:
         fwriter = csv.writer(file)
         fwriter.writerow(headers)
         for snack in snackInfoAll:
             # print(snack)
             snackInfoList = [snack['名称'], snack['价格'], snack['零食整体评价']['好评率'], snack['零食整体评价']['评论数'],
                              snack['零食整体评价']['晒图'], snack['零食整体评价']['追评数'], snack['零食整体评价']['好评数'],
                              snack['零食整体评价']['中评数'], snack['零食整体评价']['差评数']]
             fwriter.writerow(snackInfoList)
    # 程序结束时间
    endTime = time.clock()
    print('所有零食爬取完毕,程序耗费的时间为：', endTime-startTime)






