#-*- coding:utf-8 -*-
import os
import re
import urllib
import datetime

from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.selector import Selector

from query_news_info.items import QueryNewsInfoItem
from query_news_info.spiders.settings import spider_settings


class NewsSpider(BaseSpider):
    name = "news"
    url_prefix = "http://news.so.com/ns"

    def __init__(self, query, rdir, existed_urls_dir, start_time, content_id):
        self.query = query.strip()
        self.rdir = rdir
        self.content_id = content_id
        self.start_time = start_time
        self.fetch_time = datetime.date.today()

        self.existed_urls_file_path = existed_urls_dir + "/" + self.query

        if os.path.isfile(self.existed_urls_file_path):
            with open(self.existed_urls_file_path) as f:
                self.existed_urls_sets = set(map(lambda x: x.strip(), f.readlines()))
        else:
            with open(self.existed_urls_file_path, "w") as f:
                self.existed_urls_sets = set()

    def start_requests(self):
        for site in spider_settings:
            params = {
                "q": "site:%s %s" % (site, self.query),
                "pq": "site:%s %s" % (site, self.query),
                "rank": "rank",
                "src": "srp",
                "tn": "news"
            }
            url = self.url_prefix + "?" + urllib.urlencode(params)
            yield Request(url, meta={"site": site, "pn": 0}, callback=self.parse_search)

    def parse_search(self, response):
        sel = Selector(response)
        site = response.meta["site"]
        pn = response.meta["pn"]
        #先获取新闻结果,没有新闻结果的话就不会进来
        for index, sub_sel in enumerate(sel.xpath("//ul[@class='result']/li[@class='res-list']")):
            title = u"".join(sub_sel.xpath("h3/a//text()").extract()).strip()
            lst = u"".join(sub_sel.xpath("h3/span//text()").extract()).split()
            source = lst[0].strip()
            release_time = u" ".join(lst[1:]).strip()
            text_url = sub_sel.xpath("h3/a/@href").extract()[0].strip()
            rank = pn * 20 + index + 1
            abstract = u"".join(sub_sel.xpath("p/text() | p/em/text()").extract()).strip()
            if text_url in self.existed_urls_sets:
                continue
            self.existed_urls_sets.add(text_url)
            yield Request(text_url, meta={"title": title,
                                          "source": source,
                                          "release_time": release_time,
                                          "text": "",
                                          "site": site,
                                          "url": text_url,
                                          "rank": rank,
                                          "abstract": abstract,
                                          })

        #翻页
        next_page_info_lst = sel.xpath("//div[@id='page']/a[last()]/text()").extract()
        if next_page_info_lst and next_page_info_lst[0] == u"下一页>":
            next_page_url = self.url_prefix + sel.xpath("//div[@id='page']/a[last()]/@href").extract()[0]
            yield Request(next_page_url, meta={"site": site, "pn": pn + 1}, callback=self.parse_search)

    def parse(self, response):
        sel = Selector(response)
        meta = response.meta
        text_xpath, source_xpath, site_type = spider_settings[meta["site"]]
        meta["site_type"] = site_type
        meta["text"] = u" ".join(sel.xpath(text_xpath).extract()).strip()
        if source_xpath is not None:
            meta["source"] = re.split(u":|：", u"".join(sel.xpath(source_xpath).extract()).strip())[-1].strip() \
                or meta["source"]
        yield self.construct_item(meta)

    def construct_item(self, meta):
        i = QueryNewsInfoItem()
        i_keys = set(["site", "title", "text", "release_time", "source", "url", "site_type", "rank", "abstract"])
        diff_keys = i_keys - set(meta.keys())
        if diff_keys:
            exception_info = "miss data of %s" % diff_keys
            raise Exception(exception_info)
        for k in i_keys:
            if type(meta[k]) == unicode:
                i[k] = meta[k].encode("utf-8", "ignore")
            else:
                i[k] = meta[k]
        return i


