# -*- coding:UTF-8 -*-
"""
@author: DadaYu
"""
import os
from datetime import datetime
import requests
from bs4 import BeautifulSoup
import config
import doInsertDB

OUT_FILE = os.path.join(config.OUT_FILE_FOLDER_PATH, config.FILE_NAME)


# 解析url
def get_beautiful_soup(url):
    res = requests.get(url)
    res.encoding = 'utf-8'
    html = res.text
    return BeautifulSoup(html, "html5lib")


# 获取详细内容
def get_detail(url):
    detail = {
        'title': '',
        'synopsis': '',
        'content': '',
        'time': ''
    }
    bf = get_beautiful_soup(url)
    top_title = bf.find('div', class_='top-title').text
    print(top_title + ':' + url)
    report_time_span = bf.find('div', class_='top-time').find('span').text
    report_time = datetime.strptime(report_time_span, '%Y-%m-%d %H:%M')
    now_time = datetime.now()
    diff = now_time - report_time
    if diff.days <= config.QUERY_TIME_RANGE:
        list = bf.find('div', class_='article-content').find_all('p')
        for index, fruit in enumerate(list):
            if index == 0:
                detail['synopsis'] = fruit.text
            else:
                img = fruit.find('img')
                if img and 'data-echo' in img.attrs:
                    img['src'] = img['data-echo']
                    del img['data-echo']
                detail['content'] += str(fruit)
        detail['time'] = report_time_span
        detail['title'] = top_title
        return detail
    else:
        print(top_title + '，已过期')
        return None


# 整理页面信息
def organize_page_information(list):
    for a in list:
        detail = get_detail(a['href'])
        if detail:
            data = {
                'title': detail['title'],
                'author': '福州本地宝',
                'synopsis': detail['synopsis'],
                'info_sources': '福州本地宝',
                'info_url': a['href'],
                'content': detail['content'],
                'release_time': detail['time'],
                'create_time': datetime.now(),
            }
            doInsertDB.insert_info_reptile_data(data)
        # 说明后面的已经都过期了
        else:
            break

if __name__ == '__main__':
    url = 'http://fz.bendibao.com/'
    bf = get_beautiful_soup(url)

    # 今日关注
    jinriguanzhu = bf.find('div', class_='focus-zt').find('ul').find_all('a')
    organize_page_information(jinriguanzhu)
    # 焦点资讯
    jiaodianzixun = bf.find_all('ul', class_='focus-news')
    for item in jiaodianzixun:
        list = item.find_all('a')
        organize_page_information(list)
    tab_blocks = bf.find('div', class_='news-blocks').find_all('ul')
    for index, ul in enumerate(tab_blocks):
        if index == 1:
            continue
        al = ul.find_all('a')
        organize_page_information(al)
