# -*- coding: utf-8 -*-

from pymysqlpool import ConnectionPool
from scrapy.crawler import logger

from hexunblog.dao.mysql_config import db_config
from hexunblog.items import HexunblogItem
from hexunblog.service.article_service import ArticleInfoService

'''
博文信息持久化服务类
'''


class ArticleInfoDaoDB:
    def __init__(self):
        pass

    pool = None

    '''
    实例化一个数据库连接池
    '''

    @classmethod
    def connection_pool(cls):
        if not cls.pool:
            cls.pool = ConnectionPool(**db_config)
            cls.pool.connect()
        return cls.pool

    '''
    根据ID获取文章信息
    '''

    @classmethod
    def get_article_info(cls, article_id):
        sql_query = "SELECT article_id, blog_id, user_id, original, title, click_count, comment_count, author, " \
                    "tag_name, class_name, url, publish_date FROM zhihu.huxunblog_article_info WHERE article_id = %s"
        with cls.connection_pool().cursor() as cursor:
            cursor.execute(sql_query, (str(article_id)))
            for res in cursor:
                article = HexunblogItem()
                article["article_id"] = res["article_id"]
                article["blog_id"] = res["blog_id"]
                article["user_id"] = res["user_id"]
                article["original"] = res["original"]
                article["title"] = res["title"]
                article["click_count"] = res["click_count"]
                article["comment_count"] = res["comment_count"]
                article["author"] = res["author"]
                article["tag_name"] = res["tag_name"]
                article["class_name"] = res["class_name"]
                article["url"] = res["url"]
                article["publish_date"] = res["publish_date"]
                return article
        return None

    '''
    保存博文信息
    '''

    @classmethod
    def save(cls, article_id, blog_id, user_id, original, title, click_count, comment_count, author, tag_name,
             class_name, url, publish_date):
        # 文章基础信息表
        try:
            item = cls.get_article_info(article_id)
            if item:
                logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>数据已经存在，ID=" + str(article_id) + ", TITLE:" + title)
                pass
            else:
                publish_date = ArticleInfoService.tran_string_to_date(publish_date)
                sql_insert = "INSERT INTO zhihu.huxunblog_article_info ( article_id, blog_id, user_id, original, " \
                             "title, click_count, comment_count, author, tag_name, class_name, url, publish_date) " \
                             "VALUES  ( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
                with cls.connection_pool().cursor() as cursor:
                    cursor.execute(sql_insert, (article_id, blog_id, user_id, original, title, click_count,
                                                comment_count, author, tag_name, class_name, url, publish_date))
                    logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>博文信息保存成功！title:" + title)
        except Exception as err:
            logger.error(err)
            logger.error(">>>>>>>>>>>>>>>>>>>>>>>>>保存博文信息时发生异常！")

    '''
    更新博文信息
    '''

    @classmethod
    def update_click_comment_count(cls, article_id, click_count, comment_count):
        # 文章基础信息表
        try:
            item = cls.get_article_info(article_id)
            if item:
                sql_update = "UPDATE zhihu.huxunblog_article_info SET click_count = %s, comment_count = %s" \
                             "WHERE article_id = %s"
                with cls.connection_pool().cursor() as cursor:
                    cursor.execute(sql_update, (click_count, comment_count, article_id))
                    logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>博文信息更新成功！article_id:" + article_id)
            else:
                logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>数据不存在，ID=" + str(article_id))
        except Exception as err:
            logger.error(err)
            logger.error(">>>>>>>>>>>>>>>>>>>>>>>>>更新博文信息时发生异常！")
