# -*- coding:utf-8 -*-
"""
http://www.yunshi24.com/news/list/2.html
"""
import requests
from bs4 import BeautifulSoup
import json
import time


def post_html_text(data):
    url = 'https://www.huoxing24.com/info/recommend/getnews'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36', }
    try:
        r = requests.post(url, timeout=30, headers=headers, data=data)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.json()
    except Exception as e:
        print(e)
        with open('errorurl.txt', 'a', encoding='utf-8') as f:
            f.write(url + '\n')
        return None


def get_html_text(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
    }
    try:
        r = requests.get(url, timeout=30, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print(e)
        with open('errorurl.txt', 'a', encoding='utf-8') as f:
            f.write(url + '\n')
        return None


def writedata(l):
    with open('火星财经文章.txt', 'a', encoding='utf-8') as f:
        f.write(json.dumps(l, ensure_ascii=False) + '\n')


def get_urls(data):
    text = post_html_text(data)
    data = text['obj']['inforList']
    titles = [title['title'] for title in data]
    refreshTimes = [refreshTime['publishTime'] for refreshTime in data]
    imgs = [json.loads(img['coverPic'])["pc_recommend"] for img in data]
    urls = ['https://www.huoxing24.com/newsdetail/{}.html'.format(url['id']) for url in data]
    ctimes = [time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(ctime['createTime']) / 1000)) for ctime in data]
    jigou = [jigou['author'] for jigou in data]
    return (titles, ctimes, imgs, urls, jigou, refreshTimes)


def get_datas(url, title, ctime, img, jigou):
    text = get_html_text(url)
    print('{}页面获取中'.format(url))
    if not text:
        return None
    soup = BeautifulSoup(text, 'lxml')
    context = str(soup.select_one('.detail-text-cont div'))
    d = {"title": title, "img": img, "context": context, "ctime": ctime, "jigou": jigou}
    return d


def result():
    try:
        with open('config.txt', encoding='utf-8') as fd:
            dt = fd.readline()
    except:
        dt = ''
    a = 1
    b = True
    refreshTime = int(time.time()) * 1000
    print(refreshTime)

    while b:
        data = {'refreshType': '2',
                'refreshTime': refreshTime,
                'passportId': '',
                'currentPage': '2',
                'pageSize': '20', }
        try:
            data = get_urls(data)
            titles = data[0]
            ctimes = data[1]
            imgs = data[2]
            urllist = data[3]
            jigou = data[4]
            refreshTime = data[5][-1]
            print('第{}页获取中'.format(a))
            if not urllist:
                continue
            b = 0
            for url in urllist:
                d = get_datas(url, titles[b], ctimes[b], imgs[b], jigou[b])
                ctime = d['ctime']
                if ctime <= dt:
                    print('hhhhhhhhhhhhhhhh')
                    b = False
                    writedata({"title": '', "img": '', "context": '', "ctime": '', "jigou": ''})
                    break
                writedata(d)
                print(a)
                b += 1
        except:
            with open('errorurl.txt', 'a', encoding='utf-8') as f:
                f.write(url + '\n')

        a += 1


def main():
    result()


# if __name__ == '__main__':
#     main()
# with open('url.txt', encoding='utf-8') as f:
#     a = 1
#     for x in f:
#         try:
#             get_datas(x)
#             print(a)
#             a += 1
#         except:
#             with open('errorurl.txt', 'a', encoding='utf-8') as f:
#                 f.write(x + '\n')
