# -*- coding:utf-8 -*-
"""
币莱财经

"""
import requests
from bs4 import BeautifulSoup
from WriteData import writedata
from Tk import genearteMD5


def get_html_text(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
    }
    try:
        r = requests.get(url, timeout=30, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print(e)
        return None


def get_dates_kx():
    dates = []
    a = 1
    while a < 100:
        url = 'https://www.niubilai.com/index/newsflash/ajaxmorepage?page={}&cateid=48'.format(a)
        text = get_html_text(url)
        print('----币莱财经快讯获取第{}页----'.format(a))
        if not text:
            return None
        soup = BeautifulSoup(text, 'lxml')
        data = soup.select('.flash-fl .flash-fl-time span')
        dates += [date.get_text()[:10] for date in data]
        a += 1
    return dates


def count_process_kx():  # 快讯
    a = 0
    l = []
    dates = get_dates_kx()
    while 1:
        if dates[a] < dates[a + 1]:
            l.append(a + 1)
        if len(l) == 3:
            break
        a += 1
    d1 = len(dates[:l[0]])
    d2 = len(dates[l[0]:l[1]])
    d3 = len(dates[l[1]:l[2]])
    return (d1, d2, d3)

def main():
    u = '币莱财经'
    t1 = count_process_kx()
    tk = genearteMD5(u)
    D = {tk: {"name": u, "today": t1[0], "yesterday": t1[1], "frontday": t1[2]}}
    writedata(D)
    return D


if __name__ == '__main__':
    print(main())
