import time
import re
import requests
from lxml import etree
import pymysql
import datetime
from selenium.webdriver import ActionChains
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

'''
    评论源码里没有，是js加载出来的
    相当于细览页，后期大量采集的话，从数据库拿到各个手机的详情页链接，开多线程抓取评论（也不能开太多，webdriver耗cpu）
    这里先指定手机详情页链接
'''


class GLY(object):
    def __init__(self):
        self.url = 'https://www.jd.com/'
        # webdriver配置项，无头浏览器，降低损耗
        opt = webdriver.ChromeOptions()
        # opt.add_argument('--headless')
        opt.add_argument('--no-sandbox')
        opt.add_argument('--disable-gpu')
        opt.add_argument('--disable-dev-shm-usage')
        # 降配置添加到driver中
        self.driver = webdriver.Chrome(chrome_options=opt)
        self.urls = [
            'https://item.jd.com/100005150846.html',
            'https://item.jd.com/100000177760.html',  # iphoneXR
            # 'https://item.jd.com/100004404934.html',  # huaweiP30
            # 'https://item.jd.com/100003433872.html',  # IQOO
        ]

    # 得到源码
    def get_html(self, url):

        # 使用driver访问url
        # self.driver.get(self.url)
        self.driver.get(url)
        # 得到源码home_html
        home_html = self.driver.page_source
        # 退出driver
        # self.driver.quit()
        return home_html

    # 找到搜索框，根据关键词搜索到详情页
    # def search(self, key):
    #     # 等待输入框元素加载
    #     search_key = WebDriverWait(self.driver, 10).until(
    #         EC.presence_of_element_located((By.ID, 'key'))
    #     )
    #     # 输入框输入关键字
    #     search_key.send_keys(key)
    #     search_button = WebDriverWait(self.driver, 10).until(
    #         EC.presence_of_element_located((By.CLASS_NAME, 'button'))
    #     )
    #     # 模拟点击搜索按钮
    #     search_button.click()
    #     # 暂停5s（时间根据网速来定），等待网页加载（可能有更好的方法，但是目前不清楚）
    #     time.sleep(5)
    #     html = self.driver.page_source

        # print(html)

    # 指定链接爬取
    def appointed(self, url):
        html = self.get_html(url)
        # parse_html返回：model, title, lasttime, platform, phone_length, phone_width, phone_thick, post_time
        model, title, lasttime, platform, phone_length, phone_width, phone_thick, post_time = self.parse_html(html)
        self.driver.get(url)
        # 等待评论按钮加载
        comment_button = WebDriverWait(self.driver, 10).until(
            EC.presence_of_element_located((By.XPATH, '//li[@data-anchor="#comment"]'))
        )
        # 点击评论按钮模块
        comment_button.click()
        time.sleep(2)
        while True:
            # 使用js动作模拟滚轮下滑，加载评论
            for y in range(2):
                js = 'window.scrollBy(0,600)'
                self.driver.execute_script(js)
                time.sleep(1.5)

            # # 等待下一页按钮的加载
            # WebDriverWait(self.driver, 10).until(
            #     EC.presence_of_element_located((By.XPATH, '//div[@class="ui-page"]'))
            # )
            # 此时源码就有评论了
            html = self.driver.page_source
            # 如果找不到这个元素说明到底了
            try:
                next_page_button = WebDriverWait(self.driver, 10).until(
                    EC.presence_of_element_located((By.XPATH, '//a[@class="ui-pager-next" and @href="#comment"]'))
                )
            except:
                next_page_button = None
            if not next_page_button:
                print('{}  已经没有评论'.format(url))
                return
            # parse_comment返回：member_level, stars, comment_color, content, label
            member_level, stars, comment_color, content, label = self.parse_comment(html)

            self.driver.maximize_window()                       # 必须有滚动和窗口最大化
            for y in range(5):                                  # 才能进行点击事件，至于
                js = 'window.scrollBy(0,600)'                   # 具体原因，咱也不知道为
                self.driver.execute_script(js)                  # 啥，咱也不敢问
                time.sleep(1)

            # print(self.driver.page_source)
            next_page_button.click()
            time.sleep(5)

    def parse_html(self, html):
        # 默认京东
        platform = '京东'

        # 标题title
        title = re.findall('<div class="sku-name">(.*?)</div>', html, re.S)
        # 正则匹配结果为列表，所以取出字符串
        title = title[0]
        # 字符串处理
        # 去掉里面的标签
        title = re.sub('<.*?>', '', title)
        # 去掉换行符
        title = title.strip()

        # model型号
        model = re.findall('>商品名称：(.*?)</li>', html, re.S)
        model = model[0]

        # 机身长度phone_length
        phone_length = re.findall('<dt>机身长度（mm）</dt><dd>(.*?)</dd>', html)
        try:
            phone_length = phone_length[0] + 'mm'
        except:
            print('error phone_length not found')
        # 机身宽度
        phone_width = re.findall('<dt>机身宽度（mm）</dt><dd>(.*?)</dd>', html)
        try:
            phone_width = phone_width[0] + 'mm'
        except:
            print('error phone_width not found')

        # 机身厚度
        phone_thick = re.findall('<dt>机身厚度（mm）</dt><dd>(.*?)</dd>', html)
        try:
            phone_thick = phone_thick[0] + 'mm'
        except:
            print('error phone_thick not found')

        # 上市年份
        post_time = re.findall('<dt>上市年份</dt><dd>(.*?)</dd>', html)
        try:
            post_time = post_time[0]
        except:
            print('error post_time not found')

        # 抓取时间lasttime
        lasttime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        # print(model, title[0], lasttime, platform, phone_length, phone_width, phone_thick, post_time)
        return model, title, lasttime, platform, phone_length, phone_width, phone_thick, post_time

    def parse_comment(self, html):
        html_element = etree.HTML(html)
        # 评论模块的所有div
        divs = html_element.xpath('//div[@class="comment-item"]')
        for div in divs:
            try:
                member_level = div.xpath('.//div[@class="user-level"]//text()[1]')[1]
            except:
                member_level = '无'
            # element元素转字符串
            div_string = etree.tostring(div, encoding='utf-8').decode('utf-8')
            # print(div_string)
            # 用户印象：评价几星，通过正则抓取
            stars = re.findall('<div class="comment-star (.*?)"', div_string)
            stars = stars[0]

            # 该评论的机型信息comment_info
            comment_info = div.xpath('.//div[@class="order-info"]/span/text()')
            # 机身颜色
            comment_color = comment_info[0]
            # 内存（目前无该需求）
            comment_memory = comment_info[1]
            # 版本（目前无该需求）
            comment_model = comment_info[2]
            # 评论时间（目前无该需求）
            comment_time = comment_info[3]

            # 评论内容content
            content = div.xpath('.//p[@class="comment-con"]//text()')
            content = content[0]

            # 标签label
            # label = div.xpath('//div[@class="tag-list tag-available"]//text()')
            # 为了匹配出的结果格式简介，这里写的复杂一下
            label = re.findall('<div class="tag-list tag-available".*?>(.*?)</div>', html, re.S)
            label = label[0]
            label = re.sub('</span>[\s\S]*?>', '', label)
            label = label.strip()
            label = re.findall('>([\s\S]*?)<', label)
            label = label[0]
            # 去掉每个标签的数量，去不去都可以，不去掉的话注释就行了
            label = re.sub('\([\s\S]*?\)', ' ', label)

            return member_level, stars, comment_color, content, label

    def save_data(self, data):
        host = '127.0.0.1'
        db = 'app_mark'
        user = 'root'
        passwd = '123456'
        charset = 'utf8mb4'
        con = pymysql.connect(host=host, db=db, user=user, passwd=passwd, charset=charset)
        cur = con.cursor()
        sql = 'insert into jd_info(platform, model, title, content, memberlevel, userimpression, color, productsize, creationtime, lasttime, label) ' \
              'values (data[], )'
        try:
            cur.execute(sql, data)
        except Exception as e:
            con.rollback()
            print('error~', e)
        else:
            con.commit()
        cur.close()
        con.close()


if __name__ == '__main__':
    gly = GLY()
    # html = gly.get_html()
    # key = '手机'
    # gly.search(key)
    for url in gly.urls:
        gly.appointed(url)
    gly.driver.quit()
