# -*- coding:UTF-8 -*-
"""
@author: DadaYu
"""
import os
import shutil
from datetime import datetime
import requests
from bs4 import BeautifulSoup
from docx import Document
import config
from docx.shared import Inches

import emailUtils

OUT_FILE = os.path.join(config.OUT_FILE_FOLDER_PATH, config.FILE_NAME)

# 新建一个文档
def create_word_document():
    # 如果目录不存在，则创建目录
    if not os.path.exists(config.OUT_FILE_FOLDER_PATH):
        os.makedirs(config.OUT_FILE_FOLDER_PATH)
    path = OUT_FILE
    document = Document()
    document.save(path)
    return document

# 下载图片
def download_image(url, filename):
    # 如果目录不存在，则创建目录
    if not os.path.exists(config.IMG_FILE_FOLDER_PATH):
        os.makedirs(config.IMG_FILE_FOLDER_PATH)
    path = os.path.join(config.IMG_FILE_FOLDER_PATH, filename)
    response = requests.get(url)
    with open(path, 'wb') as file:
        file.write(response.content)
    return path

# 解析url
def get_beautiful_soup(url):
    res = requests.get(url)
    res.encoding = 'utf-8'
    html = res.text
    return BeautifulSoup(html, "html5lib")

# 解析链接是否为图片
def analysisHref(href, text = '链接地址'):
    img_type = href[-4:]
    if img_type in config.DOWNLOAD_IMG_TYPE_LIST:
        fileName = str((datetime.now().strftime("%Y%m%d%H%M%S%f"))) + img_type
        return {
            'type': 'img',
            'data': download_image(href, fileName)
        }
    else:
        return {
            'type': 'text',
            'grade': 0,
            'data': text + ':' + href + '\n'
        }

# 获取详细内容
def get_detail(url):

    bf = get_beautiful_soup(url)
    top_title = bf.find('div', class_='top-title').text
    print(top_title + ':' + url)
    report_time_span = bf.find('div', class_='top-time').find('span').text
    report_time = datetime.strptime(report_time_span, '%Y-%m-%d %H:%M')
    now_time = datetime.now()
    diff = now_time - report_time
    if diff.days <= config.QUERY_TIME_RANGE:
        list = bf.find('div', class_='article-content').find_all('p')
        content = []
        content.append({
            'type': 'text',
            'grade': 1,
            'data': top_title + '\n'
        })
        for p in list:
            text = p.text
            if text.strip():
                content.append({
                    'type': 'text',
                    'grade': 0,
                    'data': text + '\n'
                })
            a = p.find('a')
            img = p.find('img')
            if a:
                href = a['href']
                content.append(analysisHref(href))
            elif img:
                href = ''
                if img.has_attr('data-echo'):
                    href =img['data-echo']
                elif img.has_attr('bigpicsrc'):
                    href =img['bigpicsrc']
                elif img.has_attr('src'):
                    href =img['src']
                if href:
                    content.append(analysisHref(href))
                else:
                    content.append({
                        'type': '图片损坏',
                        'data': str(img)
                    })
        return content
    else:
        print(top_title + '，已过期')
        return []

# 整理页面信息
def organize_page_information(list):
    doc = Document(OUT_FILE)
    for a in list:
        detail = get_detail(a['href'])
        for info in detail:
            type = info['type']
            data = info['data']
            if type == 'text':
                grade = info['grade']
                if grade == 1:
                    doc.add_heading(data, level=1)
                else:
                    doc.add_paragraph(data)
            elif type == 'img':
                doc.add_picture(data, width=Inches(4))
            else:
                print(info)
        doc.save(OUT_FILE)

if __name__ == '__main__':
    url = 'http://fz.bendibao.com/'
    bf = get_beautiful_soup(url)

    create_word_document()
    # 今日关注
    jinriguanzhu = bf.find('div', class_='focus-zt').find('ul').find_all('a')
    organize_page_information(jinriguanzhu)
    # 焦点资讯
    jiaodianzixun = bf.find_all('ul', class_='focus-news')
    for item in jiaodianzixun:
        list = item.find_all('a')
        organize_page_information(list)
    emailUtils.doSendEmail(config.EMAIL_LIST, "福州本地宝今日信息","", OUT_FILE)
    # 删除下载的图片
    shutil.rmtree(config.IMG_FILE_FOLDER_PATH)
    # 删除生成的文件
    shutil.rmtree(config.OUT_FILE_FOLDER_PATH)
