import time, datetime
from re import findall
from base_spider import BaseSpider


class NyTimesSpider(BaseSpider):
    title_path = "h3/a/@title"
    summary_path = "p/text()"
    createdate_path = "div/span[@class='date']/text()"
    url_path = "h3/a/@href"
    content_regex = "class=\"content chinese\">([\s\S]*)<div class=\"authorIdentification\">"
    invalid_title_key_list = ["早报"]

    def __init__(self, img_dir, debug):
        super().__init__(img_dir, debug)
        if self.category in ["culture", "education", "technology"]:
            self.datas_path = "//div[@class='mid-info center']|//li[@class='story ']|//li[@class='story  first ']"
        else:
            self.datas_path = "//p[@class='summary']/.."
            self.createdate_regex = "\d+年\d+月\d+日"

    def parse_creator(self, data):
        if self.category in ["culture", "education", "technology"]:
            outer_creator = self.parse_param(data, "div/span[@class='byline']/span/text()")
            if len(outer_creator) > 0:
                return outer_creator
            return self.parse_param(data, "div/span[@class='byline']/text()")
        outer_creator = self.parse_param(data, "h6[@class='byline']/text()")
        if len(outer_creator) > 0:
            return outer_creator
        return self.parse_param(data, "h6[@class='byline']/span/text()")

    def parse_url(self, data):
        url = self.parse_param(data, self.url_path)
        if len(url) == 0:
            return url
        return "https://cn.nytstyle.com" + url.split("nytimes.com")[-1]

    def get_createdate(self, data):
        if self.category in ["culture", "education", "technology"]:
            createdate = self.parse_param(data, self.createdate_path)
            createdate = "-".join(findall("\d+", createdate))
            if len(createdate.strip()) > 0:
                return createdate
        return None


class BxSpider(BaseSpider):
    domain = "http://www.peacehall.com"
    datas_path = "//font/../@target/.."
    url_path = "@href"
    title_path = "text()"
    content_regex = "<!--bodystart-->([\s\S]*?)<!--bodyend-->"

    def get_createdate(self, data):
        url = self.parse_url(data)
        createdate = url.split("/")[-1].split(".")[0][:8]
        return createdate[:4] + "-" + createdate[4:6] + "-" + createdate[6:]


class BcSpider(BaseSpider):
    datas_path = "//div[@class='frame move-span cl frame-1-1-1']/div//a[text()]"
    domain = "http://www.backchina.com/"
    content_path = "//div[@class='main_content']"
    createdate_regex = "\d+-\d+-\d+ \d+:\d+"
    invalid_content_xpath_list = ["//div[@id='pt']//a[@href='http://www.backchina.com/forum/3150/index-1.html']"]


class ReutersSpider(BaseSpider):
    datas_path = "//div[@id='splitColumn']//div[@class='moduleBody']//a"
    domain = "http://cn.reuters.com"
    createdate_regex = "\d+年\d+月\d+日"
    content_path = "//div[@class='inner-container']"

    def parse_createdate_from_content(self, content):
        createdate = self.parser.parse_by_regex(content, "\d+年\d+月\d+日[\s\S]+?点\d+分")
        the_data = "-".join(findall("\d+", createdate.split("/")[0]))
        the_time = ":".join(findall("\d+", createdate.split("/")[1]))
        if '下午' in createdate:
            timearray = time.strptime(the_data + " " + the_time, "%Y-%m-%d %H:%M")
            timestamp = int(time.mktime(timearray))
            return time.strftime("%Y-%m-%d %H:%M", time.localtime(timestamp + 3600 * 12))
        return the_data + " " + the_time


class BBCSpider(BaseSpider):
    datas_path = "//a[@class='title-link']|//h3/a"
    url_path = "@href"
    domain = "http://www.bbc.com"
    createdate_regex = "\d+年 \d+月 \d+日"
    content_path = "//div[@class='story-body__inner']"
    invalid_content_xpath_list = ["//object[@type='application/x-shockwave-flash']"]

    def parse_title(self, data):
        titles = data.xpath("h3/span/text()")
        if len(titles) > 0:
            return self.wash(titles[0].strip())
        else:
            return self.wash(data.xpath("text()")[0].strip())


class MlhSpider(BaseSpider):
    datas_path = "//div[@class='date-posts']/div//h3/a[contains(@href, 'blog')]"
    createdate_regex = "\d+-\d+-\d+"
    content_regex = "<a name='more'></a>([\s\S]*)<div class='addthis_bottom_shell'"


class OnSpider(BaseSpider):
    datas_path = "//div[@id='tabContent']//h1[@class='title']/a"
    content_regex = "<p class=\"summary\">[\s\S]*</p>"

    def parse_createdate_from_content(self, content):
        createdates = self.parser.parse_by_regex(content, "\d+月\d+日.*\d+:\d+")
        if createdates is not None:
            return time.strftime("%Y") + "-" + "-".join(findall("\d+", createdates))
        return time.strftime("%Y-%m-%d %H:%M:%S")


class TorcnSpider(BaseSpider):
    datas_path = "//table[@id='datalist']//a"
    content_path = "//*[@id=\"Label1\"]/div[1]"
    createdate_regex = "\d+/\d+/\d+[\s\S]\d+:\d+:\d+"
    domain = "http://www.torcn.com"


class VoaSpider(BaseSpider):
    datas_path = "//div[@class=\"media-block-wrap\"]//a[string-length(@href)>40 and @title]"
    title_path = "@title"
    domain = "https://www.voachinese.com"
    invalid_title_key_list = ["分享到推特", "分享到脸书", "fullscreen"]

    def parse_createdate_from_content(self, content):
        createdates = self.parser.parse_by_xpath(content, "//span[@class=\"date-time\"]")
        if createdates is not None:
            date = findall("\d+年\d+月\d+日", createdates)
            if len(date) > 0:
                date = "-".join(findall("\d+", date[0]))
                d_time = findall("\d+:\d+", createdates)
                if len(d_time) > 0:
                    return date + " " + d_time[0]
        return time.strftime("%Y-%m-%d %H:%M:%S")

    def get_content(self, page):
        return self.parser.parse_by_xpath(page, "//div[@class=\"wsw\"]/p[text()]", mode="fuse")


class UsembassySpider(BaseSpider):
    pass


class ScmpSpider(BaseSpider):
    pass


class WsjSpider(BaseSpider):
    datas_path = "//div[@class='sponsored-content-wrapper']//a[string-length(text())>1]"
    content_path = "//*[@id=\"A\"]"
    creator_path = "//span[@itemprop=\"name\"]/text()"
    createdate_regex = "\d+年[\s\S]\d+月[\s\S]\d+日[\s\S]\d+:\d+"
    invalid_title_key_list = ["中国", "科技"]


if __name__ == "__main__":
    from urllib.parse import unquote
    from voice.config import spider_dict
    from xx_spider.logger import Logger
    url = 'http://localhost:8000/%E5%80%8D%E5%8F%AF%E4%BA%B2/%E5%80%8D%E5%8F%AF%E4%BA%B2-%E6%B5%B7%E5%A4%96%E5%8D%8E%E4%BA%BA%E4%B8%AD%E6%96%87%E9%97%A8%E6%88%B7.html'

    img_path = "D:\文件\项目\境外\images"
    # spider = ReutersSpider(img_path, False)
    spider = spider_dict[unquote(url).split("/")[3]]['spider'](img_path, False)
    spider.set_origin("test_source", "test_category")
    spider.db.execute_sql("delete from article where content is null;")
    spider.logger = Logger("debug", "debug")
    spider.logger.debug("spider初始化成功...")
    # debug list
    spider.crawl_list(url)

    # debug content
    # content, content_page = spider.parse_content(url, handle_img=False)
    # spider.logger.info(content_page)
    # spider.logger.info(content)
