# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, time
from jobbole.items import JobboleArticleItem

'''
博文信息服务类
'''
class ArticleInfoService:

    def __init__(self):
        pass

    '''
    根据一个日期字符串，获取日期对象
    '''
    @classmethod
    def get_date_with_frdays(cls, fr_days):
        return datetime.now() - timedelta(fr_days)

    '''
    获取当前时间前hours小时的日期对象
    '''
    @classmethod
    def get_frhours_from_now(cls, hours):
        hours = int(hours)
        t = datetime.now().time() - hours * 60 * 60
        return time.localtime(t)

    '''
    获取当前时间前几分钟的日期对象
    '''
    @classmethod
    def get_frmins_from_now(cls, mins):
        mins = int(mins)
        t = datetime.now().time() - mins * 60
        return time.localtime(t)


    '''
    日期有可能是'yyyy/mm/dd' 也有可能是'3天前'，也有可能是'9小时前'需要进一步判断和计算
    '''
    @classmethod
    def tran_string_to_date(cls, date_string):
        date = None
        try:
            date = datetime.strptime(date_string, '%Y/%m/%d')
        except Exception as err:
            pass

        try:
            # 有可能是 ‘n天前’
            date = cls.get_date_with_frdays(int(date_string.replace("天前", "")))
        except Exception as err:
            pass

        try:
            # 有可能是 ‘n小时前’
            date = cls.get_frhours_from_now(int(date_string.replace("小时前", "")))
        except Exception as err:
            pass

        try:
            # 有可能是 ‘n分钟前’
            date = cls.get_frmins_from_now(int(date_string.replace("分钟前", "")))
        except Exception as err:
            pass

        return date

    '''
    对content里的HTML标签进行转义，避免存储到Mysql里时发生异常
    '''
    @classmethod
    def tran_tag_in_content(cls, content):
        content = content.replace('"', "&quot;").replace('<', "&lt;").replace('>', "&gt;")
        return content

    '''
    根据URL组织一个HOST参数，将会放到header里
    '''

    @classmethod
    def get_host_from_url(cls, url):
        array = url.split('.')
        if len(array) >= 3:
            host = array[0].replace("http://", "").replace("https://", "") + ".jobbole.com"
            return host
        return None

    '''
    从response和URL里的内容中组织一个完事的博文Item对象
    '''
    @classmethod
    def compose_article_item(cls, response, url):
        # 发布日期<p class="p-meta">里的第一个span
        # 发布日期<p class="entry-meta-hide-on-mobile">里的文字内容
        pd_list = response.xpath(".//p[@class='entry-meta-hide-on-mobile']/text()").extract()
        if not pd_list:
            pd_list = response.xpath(".//p[@class='p-meta']/span/text()").extract()

        # 标题 <div class="entry-header"><h1>数据库新动向 Oracle 与微软割据局面产生</h1></div>
        # 标题 <h1 class="p-tit-single">申请加入翻译小组</h1>
        title_list = response.xpath(".//div[@class='entry-header']/h1/text()").extract()
        if not title_list:
            title_list = response.xpath(".//h1[@class='p-tit-single']/a/text()").extract()
        if not title_list:
            title_list = response.xpath(".//h1[@class='p-tit-single']/text()").extract()

        # <div class="breadcrumb-wrapper">/a/text()
        # <header class="w-border digg-top">/div/span/a/text()
        # <header class="w-border  digg-top-for-single">/div/span/text()
        group_list = response.xpath(".//div[@class='breadcrumb-wrapper']/a/text()").extract()
        if not group_list:
            group_list = response.xpath(".//header[@class='w-border digg-top']/div/span/a/text()").extract()
        if not group_list:
            group_list = response.xpath(".//header[@class='w-border  digg-top-for-single']/div/span/text()").extract()
        if not group_list:
            group_list = response.xpath(".//header[@class='w-border  digg-top-for-single ']/div/span/a/text()").extract()

        item = JobboleArticleItem()
        if url and len(url) > 0:
            item["url"] = url
            # url的最后一段是ID URL的组成情况比较多
            arr = url.split("?")
            arr = arr[0].split("#")
            if arr[0].endswith("/"):
                arr = arr[0].split("/")
                item["id"] = arr[-2]
            else:
                arr = arr[0].split("/")
                item["id"] = arr[-1]
        else:
            item["url"] = ""

        if pd_list and len(pd_list) > 0:
            item["publish_date"] = pd_list[0].replace("\r", "").replace("\n", "").replace(" ", "").replace("·", "")
            if len(item["publish_date"].split("/")) == 2:
                item["publish_date"] = "2017/" + item["publish_date"]
        else:
            item["publish_date"] = "1970/01/01"

        if title_list and len(title_list) > 0:
            item["title"] = title_list[0].replace("\r", "").replace("\n", "").replace(" ", "").replace("·", "")
        else:
            item["title"] = "无标题"

        if group_list and len(group_list) > 0:
            item["group_1"] = group_list[0].replace("\r", "").replace("\n", "").replace(" ", "").replace("·", "")
        else:
            item["group_1"] = "未分类"

        if group_list and len(group_list) > 1:
            item["group_2"] = group_list[1].replace("\r", "").replace("\n", "").replace(" ", "").replace("·", "")
        else:
            item["group_2"] = ""

        if group_list and len(group_list) > 2:
            item["group_3"] = group_list[2].replace("\r", "").replace("\n", "").replace(" ", "").replace("·", "")
        else:
            item["group_3"] = ""

        if group_list and len(group_list) > 3:
            item["group_4"] = group_list[3].replace("\r", "").replace("\n", "").replace(" ", "").replace("·", "")
        else:
            item["group_4"] = ""

        if group_list and len(group_list) > 4:
            item["group_5"] = group_list[4].replace("\r", "").replace("\n", "").replace(" ", "").replace("·", "")
        else:
            item["group_5"] = ""
        return item
