import requests
import json
import pymysql
import time
from lxml import etree
#商品信息
from bs4 import BeautifulSoup
from selenium.common.exceptions import NoSuchElementException

from com.py.test.reptleDolinGoodMsg.MyThread import MyThread


# class JDReptle:
#     def runThread(self,list):
#         for url in list:
#             self.getGoodsDetail(url)
#     def start(self):
#         baseUrl = "https://search.jd.com/Search?keyword=%E4%B8%9C%E8%8F%B1%E9%9D%A2%E5%8C%85%E6%9C%BA&enc=utf-8&suggest=1.def.0.V13--12s0,20s0,38s0&wq=%E4%B8%9C%E8%8F%B1&pvid=6f9fea6a1b944b2c9c08df41b79996b4"
#         headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"}
#         goodsUrls = []
#         #商品列表
#         res = requests.get(baseUrl,headers=headers)
#         html = BeautifulSoup(res.text,"lxml")
#         goods_list = html.find('div',id="J_goodsList")
#         goods_list= goods_list.find_all('div',class_="p-img")
#         #获取商品详情页地址
#         for pimg in goods_list:
#             url = pimg.a['href'].replace('https://','//').replace('//','https://')
#             goodsUrls.append(url);
#         size = len(goodsUrls)
#         step = 5 #5#一组
#         groups = [goodsUrls[i:i+step] for i in range(0,size,step)]
#         thread_num = len(groups)  #线程数
#         for i in range(thread_num):
#             t = MyThread(self.runThread,args=(groups[i]))
#             t.start()
#
#     def getGoodsDetail(self,url):
#     # res = requests.get("https://item.jd.com/39019858092.html")
#         res = requests.get(url)
#         bs = BeautifulSoup(res.text,"lxml")
#         driver = etree.HTML(res.content)
#         #标题
#         title = bs.find('ul',class_='parameter2').li['title']
#         # print(bs)
#         #品牌名称
#         brandname = driver.xpath(".//*[@clstag='shangpin|keycount|product|pinpai_1']")[0].text
#         # print(brandname)
#         #店铺名称
#         paramList = bs.find('ul',class_='parameter2').find_all('li');
#         # goodsName = paramList[0].attrs['title']
#         goodsNo = paramList[1].attrs['title']
#         shopname = paramList[2].attrs['title']
#         if "kg" in shopname:
#             shopname = driver.xpath(".//*[@clstag='shangpin|keycount|product|dianpuname1']")[0].text
#         #型号
#         model = "无"
#         ptableItem= bs.find_all('div',class_='Ptable-item')
#         for item in ptableItem:
#             dls = item.find_all('dl',class_='clearfix')
#             for dl in dls:
#                 if dl.dt.text == '认证型号':
#                     model = dl.dd.text
#         if "-" in model:
#             model = "无"
#         print(title,brandname,goodsNo,shopname,model)
#
# JDReptle().start()
# 商品评价 network 搜索 productPageComments
#

from requests.exceptions import RequestException
# url1 = "https://sclub.jd.com/comment/productPageComments.action?callback&productId=39019858092&score=0&sortType=5&page="
url1 = "https://sclub.jd.com/comment/productPageComments.action?callback=&productId=16790621987&score=0&sortType=5&page="
url2 = "&pageSize=10&isShadowSku=0&fold=1"
def get_one_page(url):
    try:
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
             "Referer": "https://item.jd.com/16790621987.html",
        }
        response = requests.get(url, headers = headers)
        if response.status_code == 200:
            print("获取页面成功！")
            return response.text
        else:
            print("获取页面失败，状态码：%d" %response.status_code)
            return None
    except RequestException:
        print("请求失败")
        return None
#
# def write_to_DB(user_name, comment):
#     db = pymysql.connect("localhost", "root", "123456", "jindong_comments")
#     cursor = db.cursor()
#
#     sql="""INSERT INTO comment_P30 VALUES ("%s", "%s")
#     """ %(user_name, comment)
#
#     try:
#         cursor.execute(sql)
#         db.commit()
#     except:
#         db.rollback()
#
#     db.close()

def parse_one_page(html):
    # html = html[len("fetchJSON_comment98vv18("):][:-2]; #去多余字符
    comments_dict = json.loads(html) # json.loads()使字符串转换为字典形式
    comment_list = comments_dict["comments"]    # 获取comments标签下的内容，即用户评论信息列表
    for item in comment_list:
        comment = item["content"]
        user_name = item["nickname"]
        userImage = item["userImage"]
        score = item["score"]
        creationTime = item["creationTime"]

        print(user_name,comment,userImage,score,creationTime)

def main():
    for page in range(4):
        url = url1 + str(page) + url2
        html = get_one_page(url)
        print("正在爬取第%d页的评论..." %(page+1))
        print(html)
        parse_one_page(html)
        time.sleep(2)

if __name__ == '__main__':
    main()