# -*- coding:utf-8 -*-
"""

"""

# -*- coding:utf-8 -*-
"""

"""

import requests
import re
from bs4 import BeautifulSoup
import time
from WriteData import writedata


def get_html_text(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
    }
    try:
        r = requests.get(url, timeout=60, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print(e)
        return None


def get_urls(url):
    urllist = []
    text = get_html_text(url)
    if not text:
        return None
    soup = BeautifulSoup(text, 'lxml')
    datas = soup.select('.td_module_10 .item-details h3 a')
    title = [title.get_text() for title in datas]
    urls = [url['href'] for url in datas]
    ctimes = soup.select('.td_module_10 .item-details time')
    ctime = [url.get_text() for url in ctimes]
    return (title, ctime, urls)


def get_datas(url):
    text = get_html_text(url)
    print('{}页面获取中'.format(url))
    if not text:
        return None
    soup = BeautifulSoup(text, 'lxml')
    # pingfen = soup.select_one('.col-md-4 .centered img')
    pingfen = str(soup.select_one('.col-md-4 .centered img'))
    pingfenp = soup.select('.col-md-4 .centered p')
    s = ''
    for x in pingfenp:
        s += (str(x) + '\n')
    context = str(soup.select_one('#contentbody'))
    Scores = str(soup.select('.td-page-content .spaced')[1])
    Disclaimer = str(soup.select('.td-page-content .spaced')[-2])
    strinfo = re.compile('<span id="moretoggle">... show more <i class="fa fa-chevron-down"></i></span>')
    return strinfo.sub('', pingfen + s + context + Scores + Disclaimer)


# url = 'https://blokt.com/ico-ratings/fluzcoin'
# print(get_datas(url))


def result():
    a = 9
    while a < 10:
        url = 'https://blokt.com/ico-ratings/page/{}'
        url = url.format(a)
        datas = get_urls(url)
        titles = datas[0]
        ctimes = datas[1]
        urls = datas[2]
        time.sleep(10)
        b = 0
        for url in urls:
            try:
                title = titles[b]
                ctime = ctimes[b]
                context = get_datas(url)
                b += 1
                d = {"title": title, "context": context, "ctime": ctime}
                writedata(d)
            except:
                with open('errorurl.txt', 'a', encoding='utf-8') as f:
                    f.write(url + '\n')
        a += 1


#
#
result()

# with open('url.txt', encoding='utf-8') as f:
#     for x in f:
#         get_datas(x)
#         time.sleep(5)
