import time
import re
import requests
from lxml import etree
import pymysql
import datetime
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

'''
    评论源码里没有，是js加载出来的
    从数据库拿到各个手机的详情页链接
'''


class XLY(object):
    def __init__(self):
        self.url = 'https://www.jd.com/'
        # webdriver配置项，无头浏览器，降低损耗
        opt = webdriver.ChromeOptions()
        opt.add_argument('--headless')
        opt.add_argument('--no-sandbox')
        opt.add_argument('--disable-gpu')
        opt.add_argument('--disable-dev-shm-usage')
        # 降配置添加到driver中
        self.driver = webdriver.Chrome(chrome_options=opt)
        self.host = '127.0.0.1'
        self.db = 'app_mark'
        self.user = 'root'
        self.passwd = '123456'
        self.charset = 'utf8mb4'

    # 得到源码
    def get_html(self, url):

        # 使用driver访问url
        # self.driver.get(self.url)
        self.driver.get(url)
        # 得到源码home_html
        home_html = self.driver.page_source
        # 退出driver
        # self.driver.quit()
        return home_html

    def get_urls(self):
        con = pymysql.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd, charset=self.charset)
        cur = con.cursor()
        sql = 'select link from gly where tag = "1"'
        after_sql = 'update gly set tag="1"'
        try:
            cur.execute(sql)
            results = cur.fetchall()
            # cur.execute(after_sql)
        except Exception as e:
            con.rollback()
            print('error~', e)
            results = None
        else:
            con.commit()
        cur.close()
        con.close()
        return results

    # 指定链接爬取
    def appointed(self, urls):
        for url in urls:
            data = {}
            url = url[0]
            html = self.get_html(url)
            # parse_html返回：model, title, lasttime, platform, phone_length, phone_width, phone_thick, post_time
            # model, title, lasttime, platform, phone_length, phone_width, phone_thick, post_time =self.parse_html(html)
            data['model'], data['title'], data['lasttime'], data['platform'], data['phone_size'], data['post_time'] = self.parse_html(html)
            data['url'] = url
            self.driver.get(url)
            # 等待评论按钮加载
            comment_button = WebDriverWait(self.driver, 10).until(
                EC.presence_of_element_located((By.XPATH, '//li[@data-anchor="#comment"]'))
            )

            # 使用js动作模拟滚轮下滑，加载评论
            for y in range(2):
                js = 'window.scrollBy(0,600)'
                self.driver.execute_script(js)
                time.sleep(1.5)
            # 点击评论按钮模块
            comment_button.click()
            time.sleep(2)

            # # 等待下一页按钮的加载
            # WebDriverWait(self.driver, 10).until(
            #     EC.presence_of_element_located((By.XPATH, '//div[@class="ui-page"]'))
            # )
            # 此时源码就有评论了
            html = self.driver.page_source
            # parse_comment返回：member_level, stars, comment_color, content, label
            # member_level, stars, comment_color, content, label = self.parse_comment(html)
            self.parse_comment(html, data)
            # next_page_button = WebDriverWait(self.driver, 10).until(
            #     EC.presence_of_element_located((By.XPATH, '//a[@class="ui-pager-next" and @href="#comment"]'))
            # )
            #
            # next_page_button.click()

        self.driver.quit()

    def parse_html(self, html):
        # 默认京东
        platform = '京东'

        # 标题title
        title = re.findall('<div class="sku-name">(.*?)</div>', html, re.S)
        # 正则匹配结果为列表，所以取出字符串
        title = title[0]
        # 字符串处理
        # 去掉里面的标签
        title = re.sub('<.*?>', '', title)
        # 去掉换行符
        title = title.strip()

        # model型号
        model = re.findall('>商品名称：(.*?)</li>', html, re.S)
        model = model[0]

        # '主屏幕尺寸（英寸）'
        phone_size = re.findall('<dt>主屏幕尺寸（英寸）</dt><dd>(.*?)</dd>', html)
        try:
            phone_size = phone_size[0] + 'mm'
        except:
            print('error phone_thick not found')

        # 上市年份
        post_time = re.findall('<dt>上市年份</dt><dd>(.*?)</dd>', html)
        try:
            post_time = post_time[0]
        except:
            print('error post_time not found')

        # 抓取时间lasttime
        lasttime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        # print(model, title[0], lasttime, platform, phone_length, phone_width, phone_thick, post_time)
        return model, title, lasttime, platform, phone_size, post_time

    def parse_comment(self, html, data):
        html_element = etree.HTML(html)
        # 评论模块的所有div
        divs = html_element.xpath('//div[@class="comment-item"]')
        for div in divs:
            try:
                member_level = div.xpath('.//div[@class="user-level"]//text()[1]')[1]
            except:
                member_level = '无'
            # element元素转字符串
            div_string = etree.tostring(div, encoding='utf-8').decode('utf-8')
            # print(div_string)
            # 用户印象：评价几星，通过正则抓取
            stars = re.findall('<div class="comment-star (.*?)"', div_string)
            stars = stars[0]

            # 该评论的机型信息comment_info
            comment_info = div.xpath('.//div[@class="order-info"]/span/text()')
            # 机身颜色
            comment_color = comment_info[0]

            # 评论内容content
            content = div.xpath('.//p[@class="comment-con"]//text()')
            content = content[0]

            # 标签label
            # label = div.xpath('//div[@class="tag-list tag-available"]//text()')
            # 为了匹配出的结果格式简介，这里写的复杂一下
            label = re.findall('<div class="tag-list tag-available".*?>(.*?)</div>', html, re.S)
            label = label[0]
            label = re.sub('</span>[\s\S]*?>', '', label)
            label = label.strip()
            label = re.findall('>([\s\S]*?)<', label)
            label = label[0]
            # 去掉每个标签的数量，去不去都可以，不去掉的话注释就行了
            label = re.sub('\([\s\S]*?\)', ' ', label)
            data['member_level'] = member_level
            data['stars'] = stars
            data['comment_color'] = comment_color
            data['content'] = content
            data['label'] = label
            print(data)
            self.save_data(data)

    def save_data(self, data):
        data_list = [data['platform'], data['model'], data['title'], data['content'], data['member_level'],
                     data['stars'], data['comment_color'], data['phone_size'], data['post_time'], data['lasttime'], data['label'], data['url']]
        con = pymysql.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd, charset=self.charset)
        cur = con.cursor()
        sql = 'insert into jd_info(platform, model, title, content, memberlevel, userimpression, color, productsize, creationtime, lasttime, label, url) ' \
              'values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
        try:
            cur.execute(sql, data_list)
            print('insert success')
        except Exception as e:
            con.rollback()
            print('error~', e)
        else:
            con.commit()
        cur.close()
        con.close()


if __name__ == '__main__':
    xly = XLY()
    # html = gly.get_html()
    # key = '手机'
    # gly.search(key)
    urls = xly.get_urls()
    if urls:
        xly.appointed(urls)
