# -*- coding:utf-8 -*-
"""

"""

# -*- coding:utf-8 -*-
"""

"""

import requests
import re
from bs4 import BeautifulSoup
import time
from WriteData import writedata


def get_html_text(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
    }
    try:
        r = requests.get(url, timeout=30, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print(e)
        get_html_text(url)


def get_urls(url):
    text = get_html_text(url)
    soup = BeautifulSoup(text, 'lxml')
    urls = soup.select('.more-link')
    urllist = [date['href'] for date in urls]
    return urllist


def get_datas(url):
    text = get_html_text(url)
    print('{}页面获取中'.format(url))
    if not text:
        return None
    soup = BeautifulSoup(text, 'lxml')
    reg = re.compile(r'<span style="font-family: impact, sans-serif; font-size: 24pt;">([\s\S]*?)</span></p>')
    title = re.findall(reg, text)[0]
    ctime = soup.select_one('time').get_text()
    imgs = soup.select('img')
    for img in imgs:
        try:
            img['src'] = img['data-src']
        except:
            pass
    context = str(soup.select('#wrap')[0])
    d = {"title": title, "ctime": ctime, "context": context}
    writedata(d)


url = 'https://www.coinschedule.com/blog/monoreto-mnr/'
get_datas(url)


def result():
    a = 1
    while a < 8:
        url = 'https://www.coinschedule.com/blog/ico-reports/page/{}/'
        url = url.format(a)
        urllist = get_urls(url)
        for url in urllist:
            with open('csurl.txt', 'a', encoding='utf-8') as f:
                f.write(url + '\n')
        print(a)
        #     get_datas(url)
        #     time.sleep(5)
        a += 1

# with open('csurl.txt', encoding='utf-8') as f:
#     for x in f:
#         get_datas(x)
