# -*- coding:utf-8 -*-
"""
深度
https://www.hecaijing.com/index/loadmore?type=15246480405134846&pn=57
新闻
https://www.hecaijing.com/index/loadmore?type=15145993891806411&pn=474
"""
import requests
from bs4 import BeautifulSoup
import json


def get_html_text(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
    }
    try:
        r = requests.get(url, timeout=30, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print(e)
        with open('errorurl.txt', 'a', encoding='utf-8') as f:
            f.write(url + '\n')
        return None


def writedata(l):
    with open('核财经深度文章.txt', 'a', encoding='utf-8') as f:
        f.write(json.dumps(l, ensure_ascii=False) + '\n')


def get_urls(url):
    text = get_html_text(url)
    if not text:
        return None
    text = json.loads(text)
    data = text['data']
    titles = [title['title'] for title in data]
    imgs = ['<img src="{}">'.format(img['img']) for img in data]
    urls = [url['app_url'] for url in data]
    ctimes = [ctime['publish_time'] for ctime in data]
    return (titles, ctimes, imgs, urls)


def get_datas(url, title, ctime, img):
    text = get_html_text(url)
    print('{}页面获取中'.format(url))
    if not text:
        return None
    soup = BeautifulSoup(text, 'lxml')
    jigou = soup.select_one('.sounce').get_text()
    context = str(soup.select_one('#main'))
    d = {"title": title, "img": img, "context": context, "ctime": ctime, "jigou": jigou}
    return d


def result():
    try:
        with open('config.txt', encoding='utf-8') as fd:
            dt = fd.readline()
    except:
        dt = ''
    a = 1
    b = True
    while b:
        url = 'https://www.hecaijing.com/index/loadmore?type=15246480405134846&pn={}'
        url = url.format(a)
        try:
            data = get_urls(url)
            titles = data[0]
            ctimes = data[1]
            imgs = data[2]
            urllist = data[3]
            print('第{}页获取中'.format(a))
            if not urllist:
                continue
            b = 0
            for url in urllist:
                d = get_datas(url, titles[b], ctimes[b], imgs[b])
                ctime = d['ctime']
                print(ctime)
                if dt >= ctime:
                    b = False
                    writedata({"title": '', "img": '', "context": '', "ctime": '', "jigou": ''})
                    break
                writedata(d)
                print(a)
                b += 1
        except:
            with open('errorurl.txt', 'a', encoding='utf-8') as f:
                f.write(url + '\n')

        a += 1


def main():
    result()

# with open('url.txt', encoding='utf-8') as f:
#     a = 1
#     for x in f:
#         try:
#             get_datas(x)
#             print(a)
#             a += 1
#         except:
#             with open('errorurl.txt', 'a', encoding='utf-8') as f:
#                 f.write(x + '\n')
