# +--------------------------
# | User: zq                -
# | Version: python3.7      -
# | Time: 2020-03-16 10:34                
# +--------------------------
import time
from selenium import webdriver
from scrapy import Selector
from hupu_selenium_spider.models import *
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.options import Options
from datetime import datetime, date

chrome_options = Options()
chrome_options.add_argument("blink-settings=imagesEnabled=false")

brower = webdriver.Chrome(
    executable_path="/Users/zhangqiang/Desktop/05-python/03-spider-learn/spider/csdn_spider/chromedriver",
    options=chrome_options)


# 处理列表
def parse_list():

    has_next_page = True
    brower.get("https://bbs.hupu.com/gear")
    while has_next_page:
        sel = Selector(text=brower.page_source)
        all_lis = sel.xpath("//ul[@class='for-list']/li")
        for li in all_lis:
            list_id_href = li.xpath(".//a[@class='truetit']/@href").extract_first()
            list_id = int(list_id_href.split('.')[0][1:])
            title = li.xpath(".//a[@class='truetit']/text()").extract_first() or li.xpath(".//a[@class='truetit']/b/text()").extract_first()
            author = li.xpath(".//a[@class='aulink']/text()").extract_first()
            create_time_str = li.xpath(".//div[@class='author box']/a[2]/text()").extract_first()
            create_time = datetime.strptime(create_time_str, "%Y-%m-%d")
            str_nums = li.xpath(".//span[@class='ansour box']/text()").extract_first()
            huifu_num = int(str_nums.split('/')[0].strip())
            click_num = int(str_nums.split('/')[1].strip())
            last_submit_str = str(date.today()) + " " + li.xpath(
                ".//div[@class='endreply box']/a[1]/text()").extract_first()
            last_submit = datetime.strptime(last_submit_str, '%Y-%m-%d %H:%M')
            last_submit_author = li.xpath(
                ".//div[@class='endreply box']/span/text()").extract_first()

            list = List()
            list.list_id = list_id
            list.title = title
            list.author = author
            list.create_time = create_time
            list.huifu_num = huifu_num
            list.click_num = click_num
            list.last_submit = last_submit
            list.last_submit_author = last_submit_author

            # 保存帖子
            existed_list = List.select().where(List.list_id == list.list_id)
            if existed_list:
                list.save()
            else:
                list.save(force_insert=True)

        # 检测是否有下一页
        try:
            next_page_ele = brower.find_element_by_xpath("//a[@class='nextPage']")
            # next_page_ele.click()
            next_page_ele.send_keys("\n")
            time.sleep(5)
            # sel = Selector(text=brower.page_source)
        except NoSuchElementException as e:
            has_next_page = False

    brower.close()


if __name__ == "__main__":
    parse_list()
