# -*- coding: utf-8 -*-
import concurrent.futures
import requests
import re
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
import datetime
from django.utils import timezone
from blog import services
from bs4 import BeautifulSoup


class Command(BaseCommand):
    """
    get article from digitalocean
    """
    help = 'get article from digitalocean'

    def add_arguments(self, parser):
        parser.add_argument('--username', type=str,  help='用户名')
        parser.add_argument('--column', type=str,  help='专栏名称')
        parser.add_argument('--start', type=int, default=0, help='开始页码')
        parser.add_argument('--end', type=int, default=3, help='结束页码')

    def handle(self, *args, **options):
        base_url = ('http://blog.csdn.net/%s/article/list/%d')
        username = options['username']
        start = int(options['start'])
        end = int(options['end'])
        self.host = 'http://blog.csdn.net'
        self.session = requests.Session()
        self.session.headers.update({
        'User-Agent': settings.USER_AGENT,
        'HTTP_ACCEPT':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
        })
        self.site = self.get_site()
        self.username = username
        with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
            future_to_url = {executor.submit(self.parse_page, base_url % (
                username, page,)): page for page in range(end, start, -1)}
            for future in concurrent.futures.as_completed(future_to_url):
                page = future_to_url[future]
                data = future.result()
                # try:
                #     data = future.result()
                # except Exception as exc:
                #     self.stdout.write(self.style.ERROR('%r generated an exception: %s' % (page, exc)))
                #     print(exc)
                # else:
                #     self.stdout.write(self.style.SUCCESS('%r page is ok' % (page, )))
        self.session.close()

    def get_site(self):
        return services.get_site(self.host, 'csdn')

    def url_get(self, url):
        self.stdout.write(self.style.SUCCESS('GET %s' % (url, )))
        return self.session.get(url)

    def url_gettext(self, url):
        res = self.url_get(url)
        return res.text

    def url_getjson(self, url):
        res = self.url_get(url)
        return res.json()

    def parse_page(self, url):
        html = self.url_gettext(url)
        soup = BeautifulSoup(html,'html.parser')

        author_div = soup.find('div',id='blog_userface')
        author_name = str(author_div.find('a',class_='user_name').string)
        for li_el in soup.find_all('div',class_=re.compile('list_item\s')):
            post_at = str(li_el.find('span',class_='link_postdate').string)
            post_at = timezone.make_aware(datetime.datetime.strptime(
                post_at.strip(), "%Y-%m-%d %H:%M"))
            link = li_el.find('a')
            url = link.get('href')
            object_int = int(url[url.rfind('/')+1:])
            article = dict(title=str(link.string).strip(),
                           description=str(li_el.find('div',class_='article_description').string),
                           post_at=post_at,
                           author_name=author_name,
                           author_username=self.username,
                           object_key_int=object_int,
                           source_url=self.host + url,
                           )
            self.parse_article(article)
            self.save_article(article)

    def save_article(self, article):
        services.save_article_async(self.site.id, article)

    def parse_article(self, article):
        source_content = self.url_gettext(article['source_url'])
        soup = BeautifulSoup(source_content, 'html.parser')

        tags = ['untagged',]
        tags_span = soup.find('span',class_='link_categories')
        if tags_span:
            tags = [str(x.string) for x in tags_span.find_all('a')]
        cats = ['Uncategorized',]
        cat_div = soup.find('div',class_='category_r')
        if cat_div:
            cats=[str(x.string) for x in cat_div.find_all('span')]
        article['tags'] = tags
        article['cats'] = cats

        body = soup.find('div', id='article_content')
        if not body:
            body = soup.find(
                'div', class_='content-body tutorial-content tutorial-content-legacy')
        if not body:
            body = soup.find('article')
        if not body:
            print(article)
            self.stdout.write(self.style.ERROR("Can't find body"))
        article['content'] = str(body)
        article['source_content'] = source_content
