# -*- coding: utf-8 -*-
import concurrent.futures
import requests
import re
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
import datetime
from django.core import serializers
import json
from django.utils import timezone
from blog import services
from bs4 import BeautifulSoup


class Command(BaseCommand):
    """
    get article from cnblogs
    """
    help = 'get article from cnblogs'

    def add_arguments(self, parser):
        parser.add_argument('--username', type=str,  help='用户名')
        # parser.add_argument('--column', type=str,  help='专栏名称')
        parser.add_argument('--start', type=int, default=0, help='开始页码')
        parser.add_argument('--end', type=int, default=-1, help='结束页码')

    def handle(self, *args, **options):
        base_url = ('http://www.cnblogs.com/%s/default.html?page=%d')
        username = options['username']
        start = int(options['start'])
        end = int(options['end'])
        self.base_url = base_url
        self.host = 'http://www.cnblogs.com/'
        self.session = requests.Session()
        self.session.headers.update({'User-Agent': settings.USER_AGENT})
        self.site = self.get_site()
        self.username = username
        self.blogapp = None
        self.blogid = None
        self.blogapp_re = re.compile("currentBlogApp = '([\d\w-]+)'")
        self.blogid_re = re.compile(',cb_blogId=(\d+),')
        self.entryid_re = re.compile(',cb_entryId=(\d+),')
        self.cats_re = re.compile('>([\s\S]+?)</a>')
        self.tags_re = re.compile('>([\s\S]+?)</a>')
        self.total_page_re = re.compile('共(\d+)页')
        self.postdate_re = re.compile(r'<span id="post-date">([\d\s:-]+?)</span>')
        self.autor_name_re = re.compile(r'')
        self.author_name = self.get_author_name()
        if end == -1:
            end = self.get_total_page()

        with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
            future_to_url = {executor.submit(self.parse_page, base_url % (
                username, page,)): page for page in range(end, start, -1)}
            for future in concurrent.futures.as_completed(future_to_url):
                # page = future_to_url[future]
                # data = future.result()
                future.result()
                # try:
                #     data = future.result()
                # except Exception as exc:
                #     self.stdout.write(self.style.ERROR('%r generated an exception: %s' % (page, exc)))
                #     print(exc)
                # else:
                #     self.stdout.write(self.style.SUCCESS('%r page is ok' % (page, )))
        self.session.close()
    def get_author_name(self):
        url = 'http://www.cnblogs.com/mvc/blog/news.aspx?blogApp=%s'%(self.username,)
        html = self.url_gettext(url)
        key = 'http://home.cnblogs.com/u/'+self.username+'/">'
        start_index = html.find(key)
        end_index = html.find('</a>',start_index)
        return html[start_index+len(key):end_index]


    def get_total_page(self):
        url = self.base_url%(self.username, 2)
        html = self.url_gettext(url)
        try:
            total_page = int(self.total_page_re.findall(html)[0])
        except:
            total_page = 1
        return total_page

    def get_site(self):
        return services.get_site(self.host, '博客园')

    def url_get(self, url):
        self.stdout.write(self.style.SUCCESS('GET %s' % (url, )))
        return self.session.get(url)

    def url_gettext(self, url):
        res = self.url_get(url)
        if res.is_redirect:
            raise requests.HTTPError()
        return res.text

    def url_getjson(self, url):
        res = self.url_get(url)
        return res.json()

    def parse_page(self, url):
        try:
            html = self.url_gettext(url)
        except requests.HTTPError:
            self.stdout.write(self.style.ERROR('Occurred http error'))
            return
        soup = BeautifulSoup(html,'html.parser')

        #author_div = soup.find('div',id='profile_block')
        # author_name = str(soup.find('a',class_='headermaintitle').string)
        if not self.blogapp:
            # var currentBlogApp = 'Albert-Lee',
            self.blogapp = self.blogapp_re.findall(html)[0]

        links = soup.select('.postTitle2') or soup.select('h2 > a')
        # soup.find_all('a',class_=re.compile('postTitle2'))
        if not links:
            self.stdout.write(self.style.ERROR("Can't find link from %s"%(url,)))
        for link in links:
            link_url = link.get('href')
            title = str(link.string).replace('[置顶]','').strip()
            self.stdout.write(self.style.SUCCESS('Find %s'%(title,)))
            try:
                object_int = int(link_url[link_url.rfind('/')+1:].replace('.html',''))
            except ValueError:
                object_int = -1
            article = dict(title=title,
                           author_name=self.author_name,
                           author_username=self.username,
                           object_key_int=object_int,
                           source_url= link_url,
                           )
            self.parse_article(article)
            self.save_article(article)

    def save_article(self, article):
        # with open('abc.txt','w') as f:
        #     f.write(serializers.serialize("xml",[self.site,]));
        #     f.write('\n')
        #     f.write(json.dumps(article))
        services.save_article_async(self.site.id, article)

    def parse_article(self, article):
        try:
            source_content = self.url_gettext(article['source_url'])
        except requests.HTTPError:
            self.stdout.write(self.style.ERROR('Occurred http error'))
            return
        if not self.blogid:
            # cb_blogId=111,
            self.blogid = int(self.blogid_re.findall(source_content)[0])
        soup = BeautifulSoup(source_content, 'html.parser')
        try:
            post_at = str(soup.find('span',id='post-date').string)
        except AttributeError:
            self.stderr.write(self.style.ERROR("Can't find post date"))
            return
            #post_at = self.postdate_re.findall(source_content)[0]
        post_at = timezone.make_aware(datetime.datetime.strptime(
           post_at.strip(), "%Y-%m-%d %H:%M"))
        article['post_at'] = post_at
        if article['object_key_int'] == -1:
            article['object_key_int'] == int(self.entryid_re.findall(source_content)[0])
        cat_tag_url = self.host + 'mvc/blog/CategoriesTags.aspx?blogApp=%s&blogId=%d&postId=%d'%(self.blogapp,self.blogid,article['object_key_int'])
        cat_tag_json = self.url_getjson(cat_tag_url)

        cats = self.cats_re.findall(cat_tag_json['Categories'])
        tags = self.tags_re.findall(cat_tag_json['Tags'])
        article['tags'] = tags
        article['cats'] = cats

        body = soup.find('div', id='cnblogs_post_body')
        if not body:
            body = soup.find(
                'div', class_='content-body tutorial-content tutorial-content-legacy')
        if not body:
            body = soup.find('article')
        if not body:
            print(article)
            self.stdout.write(self.style.ERROR("Can't find body"))
        first_p = body.find('p')
        if first_p:
            article['description'] = str(first_p)
        else:
            article['description'] = ''
        article['content'] = str(body)
        article['source_content'] = source_content.strip()
