import random

import requests
import json
import pymysql
import time
from lxml import etree
#商品信息
from bs4 import BeautifulSoup
from selenium.common.exceptions import NoSuchElementException
import decimal

from com.py.test.reptleDolinGoodMsg.DbOperate import DbOperate
from com.py.test.reptleDolinGoodMsg.MyThread import MyThread
#
from requests.exceptions import RequestException
dbo = DbOperate()
class JDReptle:
    def runThread(self,goodsList):
        for item in goodsList:
            self.getGoodsDetail(item)
    def start(self,baseUrl):
        # baseUrl = "https://search.jd.com/Search?keyword=%E4%B8%9C%E8%8F%B1%E9%9D%A2%E5%8C%85%E6%9C%BA&enc=utf-8&suggest=1.def.0.V13--12s0,20s0,38s0&wq=%E4%B8%9C%E8%8F%B1&pvid=6f9fea6a1b944b2c9c08df41b79996b4"
        # baseUrl = "https://search.jd.com/Search?keyword=%E4%B8%9C%E8%8F%B1&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&bs=1&wq=%E4%B8%9C%E8%8F%B1&ev=exbrand_%E4%B8%9C%E8%8F%B1%EF%BC%88Donlim%EF%BC%89%5E&stock=1&page=32"
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"}
        goodsList = []
        #商品列表
        print(baseUrl)
        res = requests.get(baseUrl,headers=headers)
        res.encoding = 'utf-8'
        html = BeautifulSoup(res.text,"lxml")
        goods_list = html.find('div',id="J_goodsList")
        try:
            goods_list = goods_list.find_all('div',class_="gl-i-wrap")
        except AttributeError as e:
            print("没有商品列表了！")
            exit(0)
        #获取商品详情页地址
        for item in goods_list:
            goods_item = {}
            goods_item['goodsUrl'] = item.find('div',class_="p-img").a['href'].replace('https://','//').replace('//','https://')
            goods_item['goodsPrice'] = item.find('div',class_="p-price").find('strong').get('data-price')
            if not goods_item['goodsPrice'] :
                goods_item['goodsPrice'] = item.find('div',class_="p-price").find('i').text
            goods_item['goodsName'] = item.find('div',class_="p-name").find('em').text
            goodsList.append(goods_item);
        # print(goodsList)
        size = len(goodsList)
        step = 5 #5#一组
        groups = [goodsList[i:i+step] for i in range(0,size,step)]
        thread_num = len(groups)  #线程数
        for i in range(thread_num):
            t = MyThread(self.runThread,args=(groups[i]))
            t.start()

    def getGoodsDetail(self,goods_item):
    # res = requests.get("https://item.jd.com/39019858092.html")
        res = requests.get(goods_item['goodsUrl'])
        bs = BeautifulSoup(res.text,"lxml")
        driver = etree.HTML(res.content)
        #标题
        title = bs.find('ul',class_='parameter2').li['title']
        # print(bs)
        #品牌名称
        try:
            brandname = driver.xpath(".//*[@clstag='shangpin|keycount|product|pinpai_1']")[0].text
        except IndexError:
            print(driver.xpath(".//*[@clstag='shangpin|keycount|product|pinpai_1']"))
            brandname = "无"
        # print(brandname)
        #店铺名称
        paramList = bs.find('ul',class_='parameter2').find_all('li');
        # goodsName = paramList[0].attrs['title']
        productId = paramList[1].attrs['title']
        shopname = paramList[2].attrs['title']
        if "g" in shopname:
            shopname = driver.xpath(".//*[@clstag='shangpin|keycount|product|dianpuname1']")[0].text
        #型号
        model = "无"
        ptableItem= bs.find_all('div',class_='Ptable-item')
        for item in ptableItem:
            dls = item.find_all('dl',class_='clearfix')
            for dl in dls:
                if dl.dt.text == '认证型号':
                    model = dl.dd.text

        # print(title,brandname,productId,shopname,model)
        goods_sql = "INSERT INTO `g_goods` (title,name,brandname,shopname,price,model,productId) " \
                    "VALUE (%s, %s, %s, %s, %s, %s, %s)"
        if not goods_item['goodsPrice']:
            goods_item['goodsPrice'] = "0.00"
        value =(title, goods_item['goodsName'],brandname,shopname,decimal.Decimal(goods_item['goodsPrice']),model,productId)
        # dbo = DbOperate()
        goods_table_id = dbo.execBySqlVal(goods_sql,value)
        #获取评论信息
        self.getComment(goods_table_id,productId);

    def get_one_page(self,url,productId):
        try:
            headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
                "Referer": "https://item.jd.com/"+productId+".html",
            }
            response = requests.get(url, headers = headers)
            if response.status_code == 200:
                # print("获取页面成功！")
                return response.text
            else:
                print("获取页面失败，状态码：%d" %response.status_code)
                return None
        except RequestException:
            print("请求失败")
            return None

    def parse_one_page(self,goods_table_id,html,productId):
        # html = html[len("fetchJSON_comment98vv18("):][:-2]; #去多余字符
        if not html:
            print(html)
            return 2;
        comments_dict = json.loads(html) # json.loads()使字符串转换为字典形式
        comment_list = comments_dict["comments"]    # 获取comments标签下的内容，即用户评论信息列表
        # print(comment_list)
        if not comment_list:
            print(html)
            return 1
        # 数据插入操作！！
        data_list = []
        #插入数据
        commment_sql = "INSERT INTO `a_assess` (goodsTableId,headurl,username,`level`,content,assesstime,productId) " \
                       "VALUES (%s, %s, %s, %s, %s, %s, %s)"
        for item in comment_list:
            comment = item["content"]
            user_name = item["nickname"]
            userImage = item["userImage"]
            score = item["score"]
            creationTime = item["creationTime"]
            #添加多行数据
            data_list.append((goods_table_id,userImage,user_name,str(score),str(comment),creationTime,productId))
            # print(user_name,comment,userImage,score,creationTime)
        dbo.execBySqlVals(commment_sql,data_list)
        return 0
    def getComment(self,goods_table_id,productId):
        # url1 = "https://sclub.jd.com/comment/productPageComments.action?callback&productId=39019858092&score=0&sortType=5&page="
        url1 = "https://sclub.jd.com/comment/productPageComments.action?callback=&productId="
        url2 = "&score=0&sortType=5&page="
        url3 = "&pageSize=10&isShadowSku=0&fold=1"
        for page in range(0,100):
            url = url1+productId+url2+ str(page) + url3
            # url = url1+productId+url2+ str(100) + url3
            while(True):
                starttime = time.time()
                # time.sleep(10)  #随机睡眠
                html = self.get_one_page(url,productId)
                flag = self.parse_one_page(goods_table_id,html,productId)
                if flag == 2:
                    print("wwwwwwwwwwwwww")
                    time.sleep(60)
                    continue
                endtime = time.time()
                dtime = endtime - starttime
                print("————————————————京东禁止时间:",dtime)
                if flag == 1 or flag == 0:
                    break
            if flag == 1:
                break
# baseUrl = "https://search.jd.com/Search?keyword=%E4%B8%9C%E8%8F%B1&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&bs=1&wq=%E4%B8%9C%E8%8F%B1&ev=exbrand_%E4%B8%9C%E8%8F%B1%EF%BC%88Donlim%EF%BC%89%5E&stock=1&page="
# tempNum = 0
# for num in range(25):
#     tempNum = tempNum+ 2
#     JDReptle().start(baseUrl+str(tempNum))
##测试
# JDReptle().start()
# 商品评价 network 搜索 productPageComments
#
#
# if __name__ == '__main__':
#     JDReptle().getComment("1","16790621987")



# def write_to_DB(user_name, comment):
#     db = pymysql.connect("localhost", "root", "123456", "jindong_comments")
#     cursor = db.cursor()
#
#     sql="""INSERT INTO comment_P30 VALUES ("%s", "%s")
#     """ %(user_name, comment)
#
#     try:
#         cursor.execute(sql)
#         db.commit()
#     except:
#         db.rollback()
#
#     db.close()