import scrapy
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from urllib.parse import urlparse
from scrapy.http import Request
import re
import urllib.request
from ..items import CnblogItem
import json
from selenium import webdriver
import time


class CnblogSpider(scrapy.Spider):
    name = 'cnblog'
    allowed_domains = ['news.cnblogs.com']
    start_urls = ['https://news.cnblogs.com/']
    def get_cookies(self):
        browser = webdriver.Chrome()
        url = 'https://account.cnblogs.com/signin?returnUrl=https:%2F%2Fwww.cnblogs.com%2F'
        browser.get(url)
        account = browser.find_element_by_id('mat-input-0')
        account.send_keys('791652232@qq.com')
        password = browser.find_element_by_id('mat-input-1')
        password.send_keys('chen1801')
        button = browser.find_element_by_xpath('/html/body/app-root/div/mat-sidenav-container/mat-sidenav-content/div/div/app-sign-in/app-content-container/mat-card/div/form/div/button')
        button.click()
        time.sleep(3)
        print(browser.page_source)
        button = browser.find_element_by_class_name('geetest_slider_button')    #找到“蓝色滑块”
        return browser.get_cookies()

    def get_track(distance):
        track = []
        current = 0
        mid = distance * 3 / 4
        t = 0.2
        v = 0
        while current < distance:
            if current < mid:
                a = 2
            else:
                a = -3
            v0 = v
            v = v0 + a * t
            move = v0 * t + 1 / 2 * a * t * t
            current += move
            track.append(round(move))
        return track

    def parse(self, response):
        # Cookie = response.headers.getlist('Cookie')	#获取响应cookie
        # if not Cookie:
        #     Cookie = self.get_cookies()
        soup = BeautifulSoup(response.text, 'html.parser')
        selector = soup.find_all('h2', class_="news_entry")
        for item in selector:
            a_selector = item.find('a').get('href')
            real_url = urljoin(response.url, a_selector)
            img_url = soup.find('img', class_="topic_img").get('src')
            if not img_url:
                img_url: ''
            yield Request(url=real_url,  callback=self.parse_detail, meta={'img_url': img_url})
        last_page = soup.find('div', class_="pager").find('a', {"class": ["last"]})
        if last_page:
            curr_page = soup.find('div', class_="pager").find('a', {"class": ["current"]}).next_sibling.get_text()
            yield Request(url="https://news.cnblogs.com/n/page/" + curr_page + "/",  callback=self.parse)
        pass

    def parse_detail(self, response):
        s = BeautifulSoup(response.text, 'html.parser')
        match_id = re.match('.*?(\d+)\/$', response.url)
        if not match_id:
            return
        aid = match_id.group(1)
        meta_img = response.meta['img_url']
        item = CnblogItem()
        item['title'] = s.find('div', id='news_title').findChild('a').get_text()
        item['content'] = s.find('div', id='news_content').get_text()
        item['author'] = s.find('span', class_="news_poster").find('a').get_text()
        if not re.match('^http.*', meta_img):
            meta_img = 'http:' + meta_img
        item['img_url'] = [meta_img]
        send_date_str = s.find('span', class_="time").get_text()
        item['send_date'] = re.match('发布于 (.*)', send_date_str).group(1)
        ajax_num_url = urljoin(self.start_urls[0], 'NewsAjax/GetAjaxNewsInfo?contentId=' + aid)
        yield scrapy.FormRequest(ajax_num_url, callback=self.parse_num, meta={'item': item})
        yield item

    def parse_num(self, response):
        json_data = json.loads(response.body)
        item = response.meta['item']
        item['comment_num'] = json_data['CommentCount']
        item['view_num'] = json_data['TotalView']
        yield item
