# -*- coding: utf-8 -*-
import scrapy
from six.moves import urllib
import os
import sys
from crawl.items import Articleitem
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc

class ComraSpider(scrapy.Spider):
    name = 'comra'
    allowed_domains = ['www.comra.org']
    # 开始的url （国内新闻动态，国际新闻动态，图片新闻，科技成果，技术发展，大洋百科）
    start_urls = ['http://www.comra.org/node_539533.htm',
                  'http://www.comra.org/node_539535.htm',
                  'http://www.comra.org/node_539534.htm',
                  'http://www.comra.org/node_539541.htm',
                  'http://www.comra.org/node_539543.htm',
                  'http://www.comra.org/node_539530.htm',
                  'http://www.comra.org/node_539537.htm']

    # 关键词列表
    keywords = []

    save_dir = "./videos/"

    def __init__(self):
        # 读取关键词列表中的关键词
        with open('keywords.txt', 'r', encoding='UTF-8') as f:
            self.keywords = ''.join(f.readlines()).strip('\n').splitlines()

    def parse(self, response):
        main_body = response.xpath("//div[@class='box7']")

        link_list = main_body.xpath("./ul/li/a/@href").extract()
        for link in link_list:
            # 如果是视频页面 保存页面内信息并下载视频
            if link.endswith(".mp4") or link.endswith(".mkv"):
                item = self.download_video(link, response)
                yield item
            # 普通页面 保存页面内信息
            else:
                yield scrapy.Request(url=link, callback=self.content_parse)

        # 获取下一页元素（只有一页或者到达最后一页都是NULL）
        next_page = main_body.xpath("./div[@id='autopage']//span/following-sibling::a[1]/@href")
        if next_page:
            yield scrapy.Request(url=next_page.extract()[0], callback=self.parse)

    def content_parse(self, response):
        item = Articleitem()

        # 分类信息
        item['type'] = response.xpath("//div[@class='box6b']/text()").extract()[0]
        # 标题信息
        title = response.xpath("//div[@class='box7']/h1/text()").extract()[0]
        item['title'] = title
        # 日期信息
        date = response.xpath("//div[@class='box7']/h2/text()").extract()[0]
        item['publish_time'] = date.split(" ")[0].split(":")[1] + " " + date.split(" ")[1]
        # 图片信息
        images = []
        for r in response.xpath("//div[@class='box8']//img/@src").extract():
            images.append(response.urljoin(r))
        item['images'] = images

        #作者信息

        item['url'] = response.url
        # 正文信息
        contents = response.xpath("//div[@class='box8']/p/text()").extract()
        content = ""
        for c in contents:
            content = '%s%s' % (content, c)
        item['content'] = content


        # 这里进行关键词判断 如果有关键词就加入item
        # TODO(liujunfeng)：这一部分可以转到pipelines文件中，这样不管是哪个spider都可以直接传item，让pipeline判断是否保存
        relative_keywords, is_relative  = self.is_relative(title, content)
        if is_relative:
            item['keywords'] = relative_keywords
            item['title'] = item['title'].replace('"', '”')
            item['content'] = item.get('content',"").replace('"', '”')
            return item

    def download_video(self, link, response):
        item = Articleitem()
        node = response.xpath("//a[@href='" + link + "']")
        # 标题信息
        title = node.xpath("./text()").extract()[0]
        # 日期信息
        publish_time = node.xpath("./preceding-sibling::span[1]/text()").extract()[0]
        # 视频信息
        relative_keywords, is_relative = self.is_relative(title, "")

        if is_relative:
            filename = link.split('/')[-1]
            save_path = os.path.join(self.save_dir, filename)
            # urllib.request.urlretrieve(link, save_path)
            # sys.stdout.flush()

            item['type'] = "视频新闻"
            item['title'] = title
            item['publish_time'] = publish_time
            item['keywords'] = relative_keywords
            item['video_url'] = link
            return item

    # 判断标题和正文内容是否包含关键词信息
    def is_relative(self, title, content):
        is_relative = False
        relative_keywords = []

        for keyword in self.keywords:
            if keyword in title or keyword in content:
                relative_keywords.append(keyword)
                is_relative = True

        return relative_keywords, is_relative



