# coding=utf8
from libs.help import str_to_time
import requests
from bs4 import BeautifulSoup
from bs4.element import Tag
from models.resource.classify import model_res_classify, get_classify, add_classify
from models.resource.crawl import model_res_crawl
from logging import getLogger

logger = getLogger('console')


def get_classify_from_page(soup):
    classify = dict()
    for a in soup.find(id='secondary-menu').find_all('a'):
        c_name = a.get_text()
        if c_name == '首页':
            continue
        c_url = a['href']
        classify[c_name] = c_url
    return classify


def get_page(url, page=0):
    if page != 0:
        url = url + '/page/' + str(page)
    classify_soup = BeautifulSoup(requests.get(url).text, 'lxml')
    list_div = classify_soup.find(id="entries")
    for article in list_div.find_all("h6", class_="title"):
        article_url = article.find("a")['href']
        article_soup = BeautifulSoup(requests.get(article_url).text, 'lxml')
        content_div = article_soup.find(class_="entry post clearfix singlepost")
        # 提取元素
        title = content_div.find('h1', class_='title').get_text()
        if model_res_crawl.coll.find_one({'Title': title}) is not None:
            continue

        time_str = content_div.find(class_='meta-info').get_text(strip=True).replace('发表于 ', '')
        time_stamp = str_to_time(time_str, format="%Y年%m月%d日%H:%M")
        # todo 之后根据时间停止

        content_start = content_div.find(class_='post-meta')
        content = []
        for p in content_start.next_siblings:
            if not isinstance(p, Tag):
                continue
            if "来源：" in str(p):
                break
            content.append(str(p))

        item = {
            'Title': title,
            'Content': "\n".join(content),
            'TimeStr': time_str,
            'TimeStamp': time_stamp,
            'Url': article_url,
            'Thumb': content_div.find('img')['src']
        }
        yield item
    # todo 继续往下爬
    page += 1
    # yield get_page(url, page)
    # if page < 10:
    #    get_page(url, page)


def main():
    home_uri = "http://health.newlifepost.com/"
    home_page = requests.get(home_uri).text
    home_soup = BeautifulSoup(home_page, 'lxml')

    to_crawl = get_classify_from_page(home_soup)
    # add_classify(to_crawl)
    classify = get_classify()
    for c_name, c_url in to_crawl.items():
        logger.info("爬取" + c_name)
        for item in get_page(c_url):
            item.update({
                'ClassifyName': c_name,
                'ClassifyId': classify[c_name]
            })
            logger.info(item)
            model_res_crawl.coll.insert_one(item)
