from __future__ import absolute_import, unicode_literals
import bs4
import re
import requests
from .celery import app
from spiders.base import BaseSpider
from celery import shared_task
from parsers.base_parser import BaseParser
from urllib.parse import urljoin
# from file_downloaders import downloader
from celery import chain, group, chord
from .models import Post
from .models import db
from rlibs.filter import UrlDuplicate


@app.task(bind=True, base=BaseSpider)
def detail(self, url):
    site_name = 'www.duanzh.com'
    site_id = re.findall(r'\d+', url)
    if site_id:
        site_id = site_id[0]
    else:
        site_id = 0
    try:
        judge_post = Post.get(Post.site_id == site_id, site_name == site_name)
        return None
    except:
        pass
    try:
        response = self.x_request(url, cookies={'is_loyal':'1'}, timeout=10)
        if hasattr(response, 'status_code'):
            if response.status_code != 200:
                self.retry(max_retries=5, countdown=60)
        else:
            self.retry(max_retries=5, countdown=60)
    except Exception as tmp:
        self.retry(exc=tmp, max_retries=5)
    selector = BaseParser(response.content).to_html
    try:
        title = selector.find_all('h3')[0].text
    except Exception as tmp:
        print(tmp)
        print(response.url)
    try:
        text_div = selector.find_all('div', class_='panel-body')[1]
    except Exception as tmp:
        try:
            text_div = selector.find_all('div', class_='panel-body')[0]
        except Exception as tmp:
            print('exception_url',url)
            self.retry(exc=tmp, max_retries=5)
    text_div['class'] = 'zhihu'
    text = ''
    for ele in text_div.contents:
        if isinstance(ele, bs4.element.NavigableString):
            text += '<p>{0}</p>'.format(ele.string)
        elif isinstance(ele, bs4.element.Tag):
            text += '<p>{0}</p>'.format(ele.text)
    site_id = re.findall('\d+', response.url)
    if site_id:
        site_id = site_id[0]
    else:
        site_id = 0
    m_post = Post(title=title, text=text, site_name=site_name, site_id=site_id)
    m_post.save()
    # print(title)

def handle_url(base_url, urls):
    urls_result = []
    for i in urls:
        if i and i.startswith('http'):
            urls_result.append(i)
        else:
            urls_result.append(urljoin(base_url, i))
    return urls_result

@app.task(bind=True, base=BaseSpider)
def starter(self, url):
    # if 'before' in url and not self.check_url(self.app.main, url):
    #     print('duplicate url:{0}'.format(url))
    #     return None
    print('handle url:', url)
    limits = ['www.duanzh.com', 'www.duanzh.com/comment/', 'www.duanzh.com/comment/']
    try:
        response = self.x_request(url, timeout=10, headers={'user-agent': "Firefox"})
        if hasattr(response, 'status_code'):
            if response.status_code != 200:
                self.retry(max_retries=5, countdown=60)
        else:
            self.retry(max_retries=5, countdown=60)
    except Exception as tmp:
        print('url failed, retrying')
        self.retry(exc=tmp, max_retries=5, countdown=3)
    selector = BaseParser(response.content).to_html
    urls = [i.contents[-1].get('href') for i in selector.find_all('div', class_='panel-heading')]
    url_result = handle_url(response.url, urls)
    task_urls = [self.limit_url(url, limits=limits)
                 for url in url_result if self.check_url(self.app.main, url)]
    for mm_url in task_urls:
        if mm_url:
            detail.s(mm_url).delay()
    next_url = ''
    random_url = ''
    m_urls = [i.get('href') for i in selector.find_all('a', target='_self') if i.get('href')]
    m_urls = handle_url(response.url, m_urls)
    for i in m_urls:
        tmp_url = i
        tmp_url = self.limit_url(tmp_url, limits=limits)
        if 'before' in tmp_url:
            next_url = tmp_url
        elif 'random' in tmp_url:
            random_url = tmp_url
    if next_url:
        print('new starter:', next_url)
        self.s(next_url).delay()
    elif random_url:
        self.s(random_url).delay()
    else:
        self.s('http://www.duanzh.com/random/reply').delay()
        # self.reset_session
        # print(selector.prettify())
        # self.retry(max_retries=5, countdown=60)
