"""

功能：GUI设计试验场
最终实现：零食销售排行及分析软件/*  */

"""

"""
第一步 
开始时间:2020/2/23
任务：完成爬虫工作
下一步：学习gui和数据库
结束时间：2020/5/20

"""
import requests
import csv
from lxml import etree


#头部
headers = {
    'user-agent' : 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
    'authority' : 'search.jd.com'

}

#使用新操作，使用商品id直接进入club页面，整理数据
def get_all():
    # s = page * 60 +1
    p_comment = []
    #存放爬出的数据，列表
    i = 0
    for page in range(0, 11):
        s = page * 60 + 1
        url = 'https://search.jd.com/search?keyword=零食&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&ds=1&wq=零食&psort=3' + '&page=' + str(
            2 * page + 1) + '&s=' + str(s) + '&click=0'
        r = requests.get(url, headers=headers, timeout=60)
        r.encoding='utf-8'
        html_new = etree.HTML(r.text, etree.HTMLParser())
        for li in range(1, 31):
            # 因为动态下滑，无法爬取60个
            commodity_id = html_new.xpath('//*[@id="J_goodsList"]/ul/li['+str(li)+']/attribute::data-sku')
            print(commodity_id)
            comment_url = "https://club.jd.com/comment/productCommentSummaries.action?referenceIds="
            #进入club页面
            true_url = comment_url + str(commodity_id[0])
            comment_r = requests.get(true_url,headers=headers, timeout=60)

            for comment in comment_r.json()["CommentsCount"]:
                p_comment.append([comment["CommentCount"], comment["AverageScore"],
                                  comment["GoodCount"], comment["DefaultGoodCount"],
                                  comment["GoodRate"], comment["AfterCount"], comment["VideoCount"],
                                  comment["PoorCount"], comment["GeneralCount"]])
                # 总评数，平均得分，好评数，默认好评，好评率，追评数，视频晒单数，差评数，中评数
            with open('JD_lingshi.csv', 'a', newline='', encoding='utf-8')as f:
                write = csv.writer(f)
                p_name1 = html_new.xpath('//*[@id="J_goodsList"]/ul/li['+str(li)+']/div/div[3]/a/em/text()[1]')
                p_name2 = html_new.xpath('//*[@id="J_goodsList"]/ul/li['+str(li)+']/div/div[3]/a/em/text()[2]')
                p_price = html_new.xpath('//*[@id="J_goodsList"]/ul/li['+str(li)+']/div/div[2]/strong/i/text()')
                # 这个if判断用来处理那些价格可以动态切换的商品
                write.writerow(
                [p_name1[0], p_name2,p_price[0],p_comment[i][0], p_comment[i][1], p_comment[i][2],
                p_comment[i][3], p_comment[i][4], p_comment[i][5], p_comment[i][6], p_comment[i][7],
                p_comment[i][8]])
                i += 1

        print("第{}页加载入文件完成...".format(page))
    f.close()




#爬取商品id
# def get_id():
#     # s = page * 60 +1
#     for page in range(1, 11):
#         s = page * 60 + 1
#         url = 'https://search.jd.com/search?keyword=零食&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&ds=1&wq=零食&psort=3' + '&page=' + str(
#             2 * page + 1) + '&s=' + str(s) + '&click=0'
#         r = requests.get(url, headers=headers, timeout=20)
#         r.encoding='utf-8'
#         html_1 = etree.HTML(r.text, etree.HTMLParser())
#         for li in range(1, 31):
#             # 因为动态下滑，无法爬取60个
#             shuzi = html_1.xpath('//*[@id="J_goodsList"]/ul/li['+str(li)+']/div/div[4]/strong/a/text()')
#
#             if shuzi == None:
#                 continue
#             print(shuzi)
#             #
#             # with open('name_test.csv', 'a', encoding='utf-8') as f:
#             #     writer = csv.writer(f)
#             #     writer.writerow(name_list)
#             # name_list.clear()
#         print("第{}页加载入文件完成...".format(page))
#
#
#
# #爬取销量排行
# def get_Sales():
#     # s = page * 60 +1
#     for page in range(1, 11):
#         s = page * 60 + 1
#         url = 'https://search.jd.com/search?keyword=零食&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&ds=1&wq=零食&psort=3' + '&page=' + str(
#             2 * page + 1) + '&s=' + str(s) + '&click=0'
#         r = requests.get(url, headers=headers, timeout=20)
#         r.encoding='utf-8'
#         html_1 = etree.HTML(r.text, etree.HTMLParser())
#         name_list = []
#         for li in range(1, 31):
#             # 因为动态下滑，无法爬取60个
#             name = html_1.xpath('//*[@id="J_goodsList"]/ul/li['+str(li)+']/div/div[3]/a/em/text()[1]')
#
#             name2 = html_1.xpath('//*[@id="J_goodsList"]/ul/li['+str(li)+']/div/div[3]/a/em/text()[2]')
#
#             if name == None:
#                 continue
#
#             name_list.append((name, name2))
#
#             with open('name_test.csv', 'a', encoding='utf-8') as f:
#                 writer = csv.writer(f)
#                 writer.writerow(name_list)
#             name_list.clear()
#         print("第{}页加载入文件完成...".format(page))

#爬取评论详情
def get_Comments():
    pass

#爬取图片
def get_picture():
    pass


def main():
    # get_Sales()
    get_all()

#gui实验
# import tkinter
# top = tkinter.Tk()

if __name__ == '__main__':
    main()