# -*- coding: utf-8 -*-
import scrapy
import re
from selenium import webdriver
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from scrapy.http import Request
from urllib import parse
import datetime
import time
from scrapy.selector import Selector

from ArticleSpider.tools.common import get_md5
from ArticleSpider.items import SinaNewsItem
class SinanewsSpider(scrapy.Spider):
    name = 'sinanews'
    allowed_domains = ['news.sina.com.cn']
    start_urls = ['http://news.sina.com.cn/china/']

    def __init__(self):
        #加入selenium对ajax下滑分页和单页中的评论数读取
        prefs = {"profile.managed_default_content_settings.images": 2}
        chrome_opt = webdriver.ChromeOptions()
        chrome_opt.add_experimental_option("prefs", prefs)
        self.browser = webdriver.Chrome(executable_path="G:\python\chromedriver.exe",chrome_options=chrome_opt)
        self.pages_num = 5
        dispatcher.connect(self.spider_close,signals.spider_closed)

    def spider_close(self,spider):
        print("spider closed")
        self.browser.quit()

    def parse(self,response):
        self.browser.get("http://news.sina.com.cn/china/")
        for num in range(self.pages_num):
            for i in range(3):
                self.browser.execute_script(
                    "window.scrollTo(0,document.body.scrollHeight); var lenOfPage=document.body.scrollHeight; return lenOfPage;")
                time.sleep(3)

            t_selector = Selector(text=self.browser.page_source)
            urls = t_selector.css("#subShowContent1_news4 div h2 a::attr(href)").extract()
            for url in urls:
                yield Request(url=parse.urljoin(response.url, url), callback=self.parse_detail)

            next_element = self.browser.find_element_by_css_selector("a[title='下一页']")
            if (next_element):
                next_element.click()
                

        #获取列表页中的每一个具体文章的url
        # news_urls_list =response.css("#subShowContent1_news4 div h2 a::attr(href)").extract()
        # for news_url in news_urls_list:
        #     print(parse.urljoin(response.url,news_url))
            #yield Request(url=parse.urljoin(response.url,news_url),callback=self.parse_detail)

        #提取下一页链接

        # next_element = self.browser.find_element_by_css_selector("a[title='下一页']")
        # if(next_element):
        #     next_element.click()
        #     print("=================================")
        #     print("我现在跳转下一页了！！")
        #     print("=================================")
        #     yield Request(url=parse.urljoin(response.url, response.url), callback=self.parse)
            # yield Request(url=parse.urljoin(response.url,self.browser.current_url),callback=self.parse)

    def parse_detail(self, response):
        # 提取文章的具体字段
        title = response.css(".main-title::text").extract_first("")  # 文章标题
        creat_date_time = response.css(".date-source span::text").extract_first("")
        if(creat_date_time):
            creat_date = re.split(r' ', creat_date_time)[0] #文章创建日期
            creat_time = re.split(r' ',creat_date_time)[1] #文章创建时间
        else:
            creat_date = ""
            creat_time = ""

        try:
            create_date = datetime.datetime.strptime(creat_date, "%Y/%m/%d").date()
        except Exception as e:
            create_date = datetime.datetime.now().date()

        try:
            creat_time = time.mktime(time.strptime(creat_time,"%H:%M"))
        except Exception as e:
            creat_time = time.strftime("%H:%M",time.localtime())

        data_source = response.css(".date-source a::text").extract_first("") #文章来源
        keyword_arr = response.css(".keywords a::text").extract() #文章关键词（数组）
        keyword = (',').join(keyword_arr)
        content_arr = response.xpath('//div[@id="article"]/p/text()').extract() #文章正文（字符数组）
        content = (',').join(content_arr) #文章正文

        sinanews_item = SinaNewsItem()

        sinanews_item['title'] = title
        sinanews_item['data_source'] = data_source
        sinanews_item['url'] = response.url
        #sinanews_item['url_object_id'] = get_md5(response.url)
        sinanews_item['content'] = content
        sinanews_item['keyword'] = keyword
        sinanews_item['creat_date'] = creat_date
        sinanews_item['creat_time'] = creat_time

        yield sinanews_item

