# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html


# -*- coding: utf-8 -*-
from __future__ import absolute_import

import sys, json, requests, datetime, re, traceback

sys.path.append('../../')

from crawling.pipelines import CustomizePipeline
from scutils.log_factory import LogFactory
from lxml import etree


class CsdnBlogApiPipeline(CustomizePipeline):

    def __init__(self, logger, blog_server_api_url, httpheaders, img_url_replace_dict):
        self.count = 0
        self.httpsession = requests.session()
        self.logger = logger
        self.blog_server_api_url = blog_server_api_url
        self.httpheaders = httpheaders
        self.img_url_replace_dict = img_url_replace_dict

    @classmethod
    def from_settings(cls, settings):
        my_level = settings.get('SC_LOG_LEVEL', 'INFO')
        my_name = settings.get('SC_LOGGER_NAME', 'sc-logger')
        my_output = settings.get('SC_LOG_STDOUT', True)
        my_json = settings.get('SC_LOG_JSON', False)
        my_dir = settings.get('SC_LOG_DIR', 'logs')
        my_bytes = settings.get('SC_LOG_MAX_BYTES', '10MB')
        my_file = settings.get('SC_API_PIPELINE_LOG_FILE', 'main.log')
        my_backups = settings.get('SC_LOG_BACKUPS', 5)

        logger = LogFactory.get_instance(json=my_json,
                                         name=my_name,
                                         stdout=my_output,
                                         level=my_level,
                                         dir=my_dir,
                                         file=my_file,
                                         bytes=my_bytes,
                                         backups=my_backups)
        blog_server_api_url = settings.get('CSDN_BLOG_SERVER_POST_URL')
        logger.debug('init full api url: {}'.format(blog_server_api_url))
        httpheaders = settings.get('CSDN_BLOG_SERVER_HEADER', None)
        if httpheaders is None:
            httpheaders = {}
            # for replace image url to CSDN image url (key: url regex, value: prefix will be added
        img_url_replace_dict = settings.get('CSDN_BLOG_IMG_URL_REPLACE_DICT', {'http://mmbiz.qpic.cn': 'http://ss.csdn.net/p?',
                                              'https://mmbiz.qpic.cn': 'http://ss.csdn.net/p?'})
        return cls(logger, blog_server_api_url, httpheaders, img_url_replace_dict)

    def process_item(self, item, spider):
        try:
            itemdata = item['data']
            if itemdata['result'] == 0:
                self.logger.error('data error: ' + str(itemdata))
                return
            data = dict()
            data['title'] = itemdata.get('title', '')
            raw_content = itemdata.get('article', '')
            if type(raw_content) is not unicode:
                self.logger.warn('content is not str type: ' + str(type(raw_content)) + ', title: ' +  data['title'] + ", content: " + raw_content)
            if self.img_url_replace_dict is not None and len(self.img_url_replace_dict) != 0:
                new_content = self.replace_img_url(raw_content)
                data['content'] = new_content # for replace img url to csdn img url
            else:
                data['content'] = raw_content
            #data['content'] = 'test_short_content' + str(self.count)
            data['userName'] = itemdata.get('username', '')
            data['aid'] = itemdata.get('aid', '')
            if data['userName'] == '' or data['aid'] == '':
                self.logger.debug('item: {} is not valid'.format(str(item)))
                return
            data['from'] = itemdata.get('from', '')
            #data['type'] = itemdata.get('from', '')
            #encoding = spider.settings.get('CSV_ENCODING', 'GBK')
            #data = dict()
            #for key, value in itemdata.iteritems():
            #    if value is not None and type(value) is str:
            #        data[key.encode(encoding)] = value.encode(encoding)
            start_time = datetime.datetime.now()
            response = self.httpsession.post(url=self.blog_server_api_url, headers=self.httpheaders, data=data)
            self.logger.debug("blog api response spent:{}".format(datetime.datetime.now() - start_time))
            self.count += 1
            json_body = json.loads(response.text)
            if response.status_code == 200 and json_body.get('status', False) is True:
                data_str = data['userName'] + '/' + data['title'] + '/' + data['aid']
                self.logger.debug(
                    '{}-submit data: {} success, result: {}'.format(str(self.count), data_str.encode('utf-8'), str(response.content)))
                self.logger.debug(
                    '{}-detail data: {}'.format(str(self.count), str(data),
                                                                    str(response.content)))
                    #'{}-submit data: {} success, result: {}'.format(str(self.count), data_str.encode('utf-8'), str(response.content)))

            else:
                self.logger.error(
                    '{}-submit data: {} error, result: {}'.format(str(self.count), str(data).encode('utf-8'), str(response.content)))
        except Exception as e:
            self.logger.error('Send data:' + str(data).encode('utf-8') + 'to BlogServer url:' + self.blog_server_api_url + ', exception:\n' + str(e))
            print e
    def replace_img_url(self, html_content):
        try:
            new_content = html_content
            for url_str, url_prefix in self.img_url_replace_dict.iteritems():
                new_content = re.sub(r"\"" + url_str, '"' + url_prefix + url_str, new_content)
            #dom = etree.fromstring(new_content)
            dom = etree.HTML(new_content)
            img_node_list = dom.xpath('//img')
            for img_node in img_node_list:
                attrs = img_node.attrib
                if 'data:image' in attrs.get('src', ''):
                    if attrs.get('data-src', None) is not None:
                        attrs['src'] = attrs.get('data-src')
            return etree.tostring(dom, pretty_print=True, encoding='utf-8')

        except Exception as exp:
            traceback.print_exc(file=sys.stdout)
            self.logger.error('replace_img_url error ,exception: {} '.format(str(exp)))
            raise StandardError('replace_img_url error')