# -*- coding: utf-8 -*-
import scrapy
import json
import os
from selenium import webdriver
from zhilian.settings import ROOT_DIR
from test.settings import ZHIHU_COOKIE_FILE
import time
import random
import re
from scrapy_splash import SplashRequest
from utils.splash_scripts import get_scripts


class ZhihuTotalSpider(scrapy.Spider):
    name = 'zhihu_total'
    allowed_domains = ['www.zhihu.com']
    #start_urls = ['http://www.zhihu.com/']

    def start_requests(self):
        """
        首先启动selenium爬去首页,再用splash爬去剩余页面,存储到数据库中
        :return:
        """

        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
        }

        splash_script = get_scripts("detail_zhihu")

        with open(ZHIHU_COOKIE_FILE, 'r') as f:
            cookies_json = f.read()

        cookies = json.loads(cookies_json)
        path = os.path.join(ROOT_DIR, 'chromedriver')
        browser = webdriver.Chrome(executable_path=path)
        browser.get('https://www.zhihu.com/404')

        for k in cookies:
            browser.add_cookie(k)
        browser.get('http://www.zhihu.com')
        js = """document.getElementById("root").scrollIntoView(false)"""

        break_times = 10
        i = 0
        while True:
            i +=1
            if i > 3:
                break
            browser.execute_script(js)
            sleep_time = random.randint(1, 10)
            time.sleep(sleep_time)
            a_list = browser.find_elements_by_css_selector('a[data-za-detail-view-element_name]')
            for a in a_list:
                href = a.get_attribute('href')
                #正则匹配要找的连接
                pattern = re.compile(r"https://www.zhihu.com/question/(\d+)/answer/(\d+)$")

                match = re.match(pattern, href)
                if match:
                    #再把问题提取出来
                    pattern2 = re.compile(r"https://www.zhihu.com/question/(\d+)/")
                    match2 = re.match(pattern2, href)
                    crwal_href = match2.group()
                    print('--------->',crwal_href)
                    if match:
                        yield SplashRequest(headers=headers,cookies=cookies,url=crwal_href, callback=self.parse, meta={'cookiejar': 1},args={'lua_source': splash_script, 'timeout': 300}, endpoint='execute')
                        #yield scrapy.Request(headers=headers,url=crwal_href,callback=self.parse)
        browser.close()


    def parse(self, response):
        # html = response._body.decode()
        # base_dir = os.path.dirname(os.path.abspath(__file__))
        # with open(os.path.join(base_dir, 'save_file', 'test.html'), 'w') as f:
        #     f.write(html)
        print('---------------------------------->parse')


        # 作者链接 response.css('.List-item')[0].css('a.UserLink-link[data-za-detail-view-element_name="User"]').attrib['href']
        # 作者名称(里面还有作者的头像信息) response.css('.List-item')[0].css('a.UserLink-link[data-za-detail-view-element_name="User"]').css('img.Avatar')[0].attrib['alt']
        # 文本内容 response.css('.List-item')[0].css('span.RichText').extract_first()
        # 评论赞同数量 response.css('.List-item')[0].css('button.VoteButton--up').attrib['aria-label']


