# -*- coding: utf-8 -*-
# Created by shu2015626 on 2019-7-5
"""
模块说明
"""

import re
import json
import random
import requests
import datetime
from bs4 import BeautifulSoup

HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"
}


def get_news(key_word):
    base_url = "https://www.baidu.com/s?ie=utf-8&cl=2&medium=0&rtt=4&bsst=1&rsv_dl=news_t_sk&tn=news&rn=20&word={key_word}&inputT={random_num}"
    url = base_url.format(key_word=key_word, random_num=random.randint(100, 100000))
    # base_url = "https://www.baidu.com/s?tn=news&rtt=4&bsst=1&cl=2&wd={key_word}&medium=0&rn=20"
    # url = base_url.format(key_word=key_word)
    req = requests.get(url, headers=HEADERS)
    if req.status_code == 200:
        try:
            html = req.text
            res = parse_html(key_word, html)
            is_saved = save_res(key_word, res)
        except Exception as e:
            print("解析html出错：\n")
            print(e)
    else:
        print("错误码：", req.status_code)
        print("错误原因：\n", req.reason)


def parse_html(key_word: str, html: str) -> str:
    res = []
    obj_bs = BeautifulSoup(html, 'lxml')
    news_details = obj_bs.find_all("div", attrs={'class': 'result'})
    for news in news_details:
        tmp_res = {}
        tmp_res['key_word'] = key_word
        # 新闻的链接地址
        source_url = news.find('h3', attrs={'class': 'c-title'}).find('a')
        if source_url:
            source_url = source_url['href'].strip()
            tmp_res['source_url'] = source_url
        else:
            continue
        # if not self._is_in_allowed_domains(source_url):
        #     continue

        title = news.find('h3', attrs={'class': 'c-title'}).find('a')
        if title:
            title = title.get_text().strip()
            tmp_res['title'] = title
        else:
            continue

        # 发布详情
        publish_info = news.find('p', attrs={'class': 'c-author'})
        if publish_info:
            publish_info = re.sub("\xa0+", "#", publish_info.get_text())
        else:
            continue

        try:
            publisher, publish_time = publish_info.split("#")
        except Exception as e:
            continue

        publisher = publisher.strip()
        tmp_res['publisher'] = publisher

        publish_time = publish_time.strip()
        print("publish_time: ", publish_time)
        # 解析发布日期
        if publish_time:
            if "小时前" in publish_time:
                hours = re.search(r'(\d+)小时前', publish_time)
                if hours:
                    hours = int(hours.group(1))
                else:
                    hours = 0
                publish_time = datetime.datetime.now() - datetime.timedelta(hours=hours)
            elif "分钟前" in publish_time:
                minutes = re.search(r'(\d+)分钟前', publish_time)
                if minutes:
                    minutes = int(minutes.group(1))
                else:
                    minutes = 0
                publish_time = datetime.datetime.now() - datetime.timedelta(minutes=minutes)
            else:
                # 绝大多数的格式是这样的：2018年04月17日 18:52
                publish_time = datetime.datetime.strptime(publish_time, '%Y年%m月%d日 %H:%M')

        publish_time = publish_time.strftime("%Y-%m-%d %H:%M")
        tmp_res['publish_time'] = publish_time
        # 是否只取今天的数据
        # if publish_time < datetime.datetime.today().strftime("%Y-%m-%d 00:00"):
        #    continue

        tag_summary = news.find('div', attrs={'class': 'c-summary'})
        tag_summary.p.replace_with("")
        tag_summary.span.replace_with("")
        summary = tag_summary.get_text()
        summary = summary.strip()
        tmp_res['summary'] = summary
        res.append(tmp_res)

    res_json = json.dumps(res, ensure_ascii=False, indent=2, sort_keys=True)
    return res_json


def save_res(key_word: str, res: str) -> bool:
    try:
        with open(f'./data/{key_word}.json', 'wt', encoding='utf-8') as f:
            f.write(res)
        return True
    except Exception as e:
        return False


if __name__ == "__main__":
    for key_word in ['基金', '私募', '券商', '国金证券']:
        get_news(key_word)


