#encoding=utf-8
import scrapy
from news_spider.items import NewsSpiderItem
from pyquery import PyQuery
import json
import time
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from scrapy.spiders import CrawlSpider
import datetime
import time
import iso8601

class TmtpostSpider(CrawlSpider):
    start_urls = [
                  'https://www.leiphone.com/category/transportation',
                  'https://www.leiphone.com/category/ai',
                  'https://www.leiphone.com/category/aijuejinzhi',
                  'https://www.leiphone.com/category/aihealth',
                  'https://www.leiphone.com/category/fintech',
                  'https://www.leiphone.com/category/sponsor',
                  'https://www.leiphone.com/category/letshome',
                  'https://www.leiphone.com/category/arvr',
                  'https://www.leiphone.com/category/robot',
                  'https://www.leiphone.com/category/yanxishe',
                  'https://www.leiphone.com/category/weiwu',
                  'https://www.leiphone.com/category/iot']
    name = 'leiphone'
    allowed_domains = ['leiphone.com']
    rules = (
        Rule(
            LinkExtractor(allow=r"https://www.leiphone.com/news/\d+/*"),
            callback="parseNews",
            follow=True
        ),
    )

    def parseNews(self, response):
        self.logger.info('A response from %s just arrived!', response.url)

        item = NewsSpiderItem()
        timee = response.xpath("//meta[@property='article:published_time']/@content").extract()
        title = response.xpath("//title/text()").extract()

        content_elements = response.xpath("//div[@class='lph-article-comView']").extract()
        content = PyQuery(content_elements[0]).text()
        timee = timee[0]
        timee = iso8601.parse_date(timee)
        timee = time.mktime(timee.timetuple())
        item['time'] = timee
        item['title'] = PyQuery(title[0]).text()
        item['url'] = response.url
        item['content'] = content
        yield item

    def tryFindChild(self, element):

        children = element
        if len(children):
            return element.text + " " + children[0].text
        else:
            return element.text
