# -*- coding:utf-8 -*-
"""

"""
# -*- coding:utf-8 -*-
"""

"""

import requests
from bs4 import BeautifulSoup
import time
from WriteData import writedata
import json


def get_html_text(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
    }
    try:
        r = requests.get(url, timeout=30, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print(e)
        return None


def get_urls(url):
    urllist = []
    text = get_html_text(url)
    if not text:
        return None
    text = json.loads(text)
    data = text['data']['data']
    if not data:
        return urllist
    title = [date['title'] for date in data]
    ctime = [date['createDt'] for date in data]
    urls = [date['id'] for date in data]
    for url in urls:
        urllist.append('https://portal.one.top/app/v1/information/queryReportDetails?id={}'.format(url))
    return (title, ctime, urllist)


def get_datas(url):
    text = get_html_text(url)
    print('{}页面获取中'.format(url))
    if not text:
        return None
    text = json.loads(text)
    context = text['data']['context']
    return context


def result():
    a = 1
    while a < 17:
        b = 0
        url = 'https://portal.one.top/app/v1/information/queryInformationReport?page={}&size=5'
        url = url.format(a)
        data = get_urls(url)
        titlelist = data[0]
        ctimelist = data[1]
        urllist = data[2]
        print(a)
        # time.sleep(10)
        for url in urllist:
            title = titlelist[b]
            ctime = ctimelist[b]
            context = get_datas(url)
            d = {"title": title, "ctime": ctime, "context": context}
            writedata(d)
            b += 1
        a += 1


# with open('url.txt', encoding='utf-8') as f:
#     for x in f:
#         get_datas(x)
#         time.sleep(5)

result()
