# -*- coding:utf-8 -*-
"""

"""
# -*- coding:utf-8 -*-
"""

"""

import requests
import re
from bs4 import BeautifulSoup
import time
from WriteData import writedata


def get_html_text(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
    }
    try:
        r = requests.get(url, timeout=30, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print(e)
        return None


def get_urls(url):
    urllist = []
    text = get_html_text(url)
    if not text:
        return urllist
    reg1 = re.compile(r'<span class="article_name">([\s\S]*?)</span>')
    reg = re.compile(r'href="(/Content/information[\s\S]*?)">')
    urls = re.findall(reg, text)
    jigoulist = re.findall(reg1, text)
    for url in urls:
        urllist.append('https://www.ihuoqiu.com/' + url)
    return (jigoulist, urllist)


def get_datas(url, jigou):
    text = get_html_text(url)
    print('{}页面获取中'.format(url))
    if not text:
        return None
    soup = BeautifulSoup(text, 'lxml')
    title = soup.select_one('.hq_information_title').get_text()
    ctmie = soup.select('.hq_information_datetime')[0].get_text()
    reg = re.compile(r'<div class="article-content article">([\s\S]*?)</div>')
    context = re.findall(reg, text)[0]
    d = {"title": title, "context": context, "ctime": ctmie, "jigou": jigou}
    writedata(d)


def result():
    a = 1
    while a < 6:
        url = 'https://www.ihuoqiu.com/Content/search?data=W9F3j2vgufgdWZmdtGFOlg__2C__2C&Strkey=%E7%81%AB%E7%90%83%E8%AF%84%E7%BA%A7&pageIndex={}'
        url = url.format(a)
        data = get_urls(url)
        urllist = data[1]
        jigoulist = data[0]
        if not urllist:
            continue
        time.sleep(3)
        print(a)
        b = 0
        for url in urllist:
            try:
                get_datas(url, jigoulist[b])
                print(a)
                b += 1
            except:
                with open('errorurl.txt', 'a', encoding='utf-8') as f:
                    f.write(url + '\n')

        a += 1


#
result()

# with open('url.txt', encoding='utf-8') as f:
#     a = 1
#     for x in f:
#         try:
#             get_datas(x)
#             print(a)
#             a += 1
#         except:
#             with open('errorurl.txt', 'a', encoding='utf-8') as f:
#                 f.write(x + '\n')
