from __future__ import absolute_import, unicode_literals
import bs4
import re
import requests
from .celery import app
from spiders.base import BaseSpider
from celery import shared_task
from parsers.base_parser import BaseParser
from urllib.parse import urljoin
# from file_downloaders import downloader
from celery import chain, group, chord
from .models import Novel, Author, Category, Article
from .models import db_novel
from rlibs.filter import UrlDuplicate


def handle_url(base_url, urls):
    urls_result = []
    for i in urls:
        if i and i.startswith('http'):
            urls_result.append(i)
        else:
            urls_result.append(urljoin(base_url, i))
    return urls_result

@app.task(bind=True, base=BaseSpider)
def text(self, url, novel_id, article_id):
    print('handle text', url, novel_id, article_id)
    try:
        response = self.x_request(url, timeout=10, headers={'user-agent': "Firefox"})
        if hasattr(response, 'status_code'):
            if response.status_code != 200:
                self.retry(max_retries=5, countdown=60)
        else:
            self.retry(max_retries=5, countdown=60)
    except Exception as tmp:
        print('url failed, retrying')
        self.retry(exc=tmp, max_retries=5, countdown=3)
    selector = BaseParser(response.content).to_html

    novel, created = Novel.get_or_create(id=novel_id)
    title = selector.select_one('div.h1title h1').text
    text = ''
    for ele in selector.select_one('#htmlContent').contents:
        if isinstance(ele, bs4.element.NavigableString):
            text += '<p>{0}</p>'.format(ele.string.strip())
    article, created = Article.get_or_create(
        novel=novel, title=title, text=text, chapter_id=article_id
    )
    article.save()



@app.task(bind=True, base=BaseSpider)
def detail(self, url, novel_id):
    print('handle detail', url, novel_id)
    try:
        response = self.x_request(url, timeout=10, headers={'user-agent': "Firefox"})
        if hasattr(response, 'status_code'):
            if response.status_code != 200:
                self.retry(max_retries=5, countdown=60)
        else:
            self.retry(max_retries=5, countdown=60)
    except Exception as tmp:
        print('url failed, retrying')
        self.retry(exc=tmp, max_retries=5, countdown=3)
    selector = BaseParser(response.content).to_html
    urls = [i.get('href') for i in selector.select('ul.mulu_list a')]
    group(text.s(i, novel_id, int(re.findall('(\d+).html', i)[0]))
          for i in handle_url(response.url, urls)).delay()


@app.task(bind=True, base=BaseSpider)
def starter(self, url):
    # if 'before' in url and not self.check_url(self.app.main, url):
    #     print('duplicate url:{0}'.format(url))
    #     return None
    print('handle url:', url)
    limits = ['www.ybdu.com']
    try:
        response = self.x_request(url, timeout=10, headers={'user-agent': "Firefox"})
        if hasattr(response, 'status_code'):
            if response.status_code != 200:
                self.retry(max_retries=5, countdown=60)
        else:
            self.retry(max_retries=5, countdown=60)
    except Exception as tmp:
        print('url failed, retrying')
        self.retry(exc=tmp, max_retries=5, countdown=3)
    selector = BaseParser(response.content).to_html
    for ul in selector.select('div.rec_rullist ul'):
        novel = {}
        for li in ul.find_all('li'):
            li_class = li['class'][0]
            if li_class == 'one':
                novel['rank'] = int(li.text)
            elif li_class == 'two':
                novel['name'] = li.text
                novel['url'] = li.a.get('href')
            elif li_class == 'four':
                novel['author'] = li.text
            elif li_class == 'sev':
                novel['category'] = li.text
        print(novel)
        author, created = Author.get_or_create(name=novel.get('author'))
        author.save()
        category, created = Category.get_or_create(name=novel.get('category'))
        category.save()
        tmp_novel, created = Novel.get_or_create(author=author, category=category,
                                                 title=novel.get('name'),
                                                 rank=novel.get('rank'))
        tmp_novel.save()
        novel_url = novel.get('url')
        if self.check_url(self.app.main, novel_url):
            novel_url = self.limit_url(novel_url, limits=limits)
            detail.delay(novel_url, tmp_novel.id)
    next_url = selector.select_one('div.pagelink a.next').get('href')
    self.s(next_url).delay()

