# -*- coding: utf-8 -*-

import scpy.xawesome_time as xawsome_time

import util
from crawler import Crawler


class BaiduNews(Crawler):

    def _crawl(self, params):
        url = 'http://news.baidu.com/ns?word={}&pn={}'.format(params['keyword'], 750)
        req_params = {
            'c': 'blog',
            'q': params['keyword'].encode('gb2312'),
            'range': 'article',
            'by': 'all',
            'sort': 'time',
            'page': '1',
            'dpc': '1'
        }

        retval = []

        continue_to_page = True
        while continue_to_page:
            data = self._structuring_content(self._fetch_content(url, req_params, timeout=30), 'html5lib')
            data = data.find('div', class_='result-boxes')
            if not data:
                break
            data = data.findAll('div', class_='box-result clearfix')

            for x in data:
                try:
                    title_url = x.find('h2', class_='r-info-blog-tit')
                    title = title_url.get_text().strip()
                    url = title_url.find('a').get('href')
                    clearfix = x.find('div', class_='clearfix')
                    blog_info = clearfix.find('div', 'r-info r-info-blog r-info-blog-full')
                    abstract = blog_info.find('p', class_='content').get_text().strip()
                    time = blog_info.find('span', class_='fgray_time').get_text()
                    if time < params['time_limit']:
                        continue_to_page = False
                        break
                    blog = {
                        'createdAt': util.current_date(),
                        'title': title,
                        'url': url,
                        'newsTime': xawsome_time.parse_time(time),
                        'crawlerSource': 'sina_blog',
                        'sourceClassify': 'blog',
                        'source': 'blog',
                        'abstract': abstract,
                        'searchKey': params['keyword'],
                        'content': self._fetch_and_extract_content(url)
                    }

                    retval.append(blog)
                except Exception as e:
                    self._logger.error('parse blog failed: blog[ %s ] error[ %s ]' % (x, e))
                    break

            req_params['page'] = str(int(req_params['page']) + 1)

        return retval
