# -*- coding: utf-8 -*-
import concurrent.futures
import requests
from django.core.management.base import BaseCommand, CommandError
from blog import services
from bs4 import BeautifulSoup


class Command(BaseCommand):
    """
    get article from weiguda
    """
    help = 'get article from weiguda'

    def add_arguments(self, parser):
        parser.add_argument('--start', type=int, default=0, help='page size')
        parser.add_argument('--end', type=int, default=3, help='page size')

    def handle(self, *args, **options):
        base_url = ('http://www.weiguda.com/blog/?page=%d')
        start = int(options['start'])
        end = int(options['end'])
        self.host = 'http://www.weiguda.com'
        self.session = requests.Session()
        self.site = self.get_site()
        with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
            future_to_url = {executor.submit(self.parse_page, base_url % (
                page,)): page for page in range(end, start, -1)}
            for future in concurrent.futures.as_completed(future_to_url):
                page = future_to_url[future]
                data = future.result()
                # try:
                #     data = future.result()
                # except Exception as exc:
                #     self.stdout.write(self.style.ERROR('%r generated an exception: %s' % (page, exc)))
                #     print(exc)
                # else:
                #     self.stdout.write(self.style.SUCCESS('%r page is ok' % (page, )))
        self.session.close()

    def get_site(self):
        return services.get_site(self.host, 'weiguda')

    def url_get(self, url):
        self.stdout.write(self.style.SUCCESS('GET %s' % (url, )))
        return self.session.get(url)

    def url_gettext(self, url):
        res = self.url_get(url)
        return res.text

    def url_getjson(self, url):
        res = self.url_get(url)
        return res.json()

    def parse_page(self, url):
        html = self.url_gettext(url)
        soup = BeautifulSoup(html,'html.parser')

        for item_div in soup.find_all('div',class_='blog_abstract'):
            item_anchor = item_div.find('a')
            url = item_anchor.get('href')
            object_id = int(url.replace('/blog/','')[:-1])
            article = dict(title=item_anchor.text,
                           description=item_div.find('p').text,
                           #post_at=hit['feedable_date'],
                           #author_name='weiguda',
                           object_key_int=object_id,
                           source_url=self.host + url,
                           tags=['django',])
            self.parse_article(article)
            self.save_article(article)

    def save_article(self, article):
        services.save_article_async(self.site.id, article)

    def parse_article(self, article):
        source_content = self.url_gettext(article['source_url'])
        soup = BeautifulSoup(source_content, 'html.parser')
        subtitle_div = soup.find('div',class_='blog_subtitle')
        author_anchor = subtitle_div.find('a')
        article['author_name'] = author_anchor.text.replace(',','').strip()
        print(article['author_name'])
        article['post_at'] = subtitle_div.find('time').get('datetime')
        body = soup.find('div', attrs={'itemprop':'articleBody'})
        if not body:
            print(article)
            self.stdout.write(self.style.ERROR("Can't find body"))
        article['content'] = str(body.prettify())
        article['source_content'] = source_content
