# -*- coding: utf-8 -*-

import re

from bs4 import BeautifulSoup
from pyquery import PyQuery
from readability.readability import Document

import scpy.xawesome_time as xawsome_time
import scpy2.util as util
from scpy2.crawlers.crawler import CrawlProcessor


class TouTiaoNews(CrawlProcessor):

    def crawl(self, params):
        url = 'http://toutiao.com/search_content'

        retval = []

        with self.crawler.reaper.new() as rs:
            for x in xrange(2):
                res = rs.get(url, params={
                    'offset': 20 * x,
                    'format': 'json',
                    'keyword': params['keyword'],
                    'autoload': 'true',
                    'count': 20
                })
                if res.status_code != 200:
                    continue

                content = self.__structuring_content(res.content)
                if not content:
                    continue

                content = content.find('body').find('p').get_text()
                if not content:
                    continue

                content = eval(self.__replace_content([('false', 'False'), ('true', 'True')], content))

                for y in content['data']:
                    if params['time_limit'] > y['datetime']:
                        continue

                    if not params.get('company_name', ''):
                        news = self.__purify_and_format(
                            y, params['keyword'], self.__fetch_and_extract_content(y['url']), 'toutiao'
                        )
                        self.crawler.logger.debug('formatted news: %s' % util.ustr(news))

                        retval.append(news)

        self.crawler.logger.debug(retval)

        return retval

    def __structuring_content(self, content, method='lxml'):
        return BeautifulSoup(content, method)

    def __fetch_and_extract_content(self, url):
        content = None
        with self.crawler.reaper.new() as rs:
            res = rs.get(url)
            if res.status_code == 200:
                content = res.content

        return self.__extract_content(content)

    def __extract_content(self, data):
        try:
            readable_article = Document(data).summary()

            content = PyQuery(readable_article).text()
            if content:
                text_p = readable_article.replace('<p>', '@@@p@@@').replace('</p>', '+++p+++')
                text_p = re.sub('<.+?>', '', text_p)
                text_p = text_p.replace('@@@p@@@', '<p>').replace('+++p+++', '</p>')
            else:
                content = ''
                text_p = '<p></p>'

            return (content, text_p)
        except Exception as e:
            self.crawler.logger.error('extract content failed: error[ %s ]' % e)

            return ('', '<p></p>')

    def __replace_content(self, patterns, content):
        for x in patterns:
            s, t = x
            content = re.sub(s, t, content)

        return content

    def __purify_and_format(self, original, keyword, content, crawler_source, classify='news'):
        print(original)
        return {
            'createdAt': util.current_date(),
            'newsTime': xawsome_time.parse_time(original.get('datetime', '')),
            'crawlerSource': crawler_source,
            'sourceClassify': classify,
            'source': util.ustr(original.get('source', '')) if original.get('source', '') else crawler_source,
            'title': util.ustr(original.get('title', '')),
            'url': original.get('url', ''),
            'abstract': util.ustr(original.get('abstract', '')),
            'searchKey': keyword,
            'content': content
        }
