# -*- coding:utf-8 -*-
"""

"""
# -*- coding:utf-8 -*-
"""

"""

# -*- coding:utf-8 -*-
"""

"""

import requests
import re
from bs4 import BeautifulSoup
import time
from WriteData import writedata


def post_html_text(url, data):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
    }
    try:
        r = requests.post(url, timeout=30, data=data, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print(e)


def get_html_text(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
    }
    try:
        r = requests.get(url, timeout=60, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print(e)
        return None


def get_urls(url, data):
    urllist = []
    text = post_html_text(url, data)
    if not text:
        return None
    text = eval(text)['server_reply_html_data']
    with open('text111111.txt', 'a', encoding='utf-8') as f:
        f.write(text)
    soup = BeautifulSoup(text, 'lxml')
    datas = soup.select('.td-block-span6 h3 a')
    urls = [url['href'] for url in datas]

    for x in urls:
        urllist.append(re.sub(r'\\', '', x))
    return urllist


# data = {'action': 'td_ajax_loop',
#         'loopState[sidebarPosition]': ' ',
#         'loopState[moduleId]': '1',
#         'loopState[currentPage]': '8',
#         'loopState[max_num_pages]': '7',
#         'loopState[atts][category_id]': '76',
#         'loopState[ajax_pagination_infinite_stop]': '3',
#         'loopState[server_reply_html_data]': ' ', }
#
# url = 'https://blokt.com/wp-admin/admin-ajax.php?td_theme_name=Newspaper&v=8.7.2'
# print(get_urls(url, data))


def get_datas(url):
    text = get_html_text(url)
    print('{}页面获取中'.format(url))
    if not text:
        return None
    soup = BeautifulSoup(text, 'lxml')
    try:
        title = soup.select_one('header h1').get_text()
        context = str(soup.select_one('.td-post-content'))
        ctime = soup.select_one('.td-full-screen-header-image-wrap time').get_text()
        # reg = re.compile(r'<div class="td-post-content">([\s\S]*)<div class="td-a-rec td-a-rec-id-content_bottom td_uid_12_5bc933d3e9845_rand td_block_template_1"><h4')
        # context = re.findall(reg, context)
        d = {"title": title, "context": context, "ctime": ctime}
        return d
    except:
        return {"title": '', "context": '', "ctime": ''}


def result():
    a = 1
    while a < 8:
        data = {'action': 'td_ajax_loop',
                'loopState[sidebarPosition]': ' ',
                'loopState[moduleId]': '1',
                'loopState[currentPage]': a,
                'loopState[max_num_pages]': '7',
                'loopState[atts][category_id]': '76',
                'loopState[ajax_pagination_infinite_stop]': '3',
                'loopState[server_reply_html_data]': ' ', }

        url = 'https://blokt.com/wp-admin/admin-ajax.php?td_theme_name=Newspaper&v=8.7.2'

        datas = get_urls(url, data)

        if not datas:
            print('---------------------{}---------------'.format(a))
            continue

        for url in datas:
            d = get_datas(url)
            writedata(d)
            with open('errorurl.txt', 'a', encoding='utf-8') as f:
                f.write(url + '\n')
        a += 1


#
#
result()
# with open('url.txt', encoding='utf-8') as f:
#     for x in f:
#         get_datas(x)
#         time.sleep(5)
