import requests, json
from apscheduler.schedulers.blocking import BlockingScheduler
from datetime import datetime
import craw

base_url = "http://www.dgzfsj.com"

urls = [
    base_url,
]

urls = craw.get_urls(base_url, urls)


def bd_request():
    dgzfsj_url = "http://data.zz.baidu.com/urls?site=www.dgzfsj.com&token=lZeXaJYRYDv6n2zW"

    headers = {
        'Content-Type': 'text/plain'
    }

    result = requests.post(dgzfsj_url, "\n".join(urls), headers=headers).content

    # 字节转为字符串
    result = bytes.decode(result)

    # 转化为json对象
    result = json.loads(result, encoding="UTF8")
    print(result)
    file = open('log.txt', 'a')
    if 'success' in result:
        file.write('%s---提交成功%s个链接 \r' % (datetime.now(), result['success']))
    else:
        file.write('%s---提交失败 \r' % datetime.now())
    file.flush()
    file.close()


if __name__ == '__main__':
    scheduler = BlockingScheduler()
    scheduler.add_job(bd_request, 'cron', minute="*/30")

    try:
        scheduler.start()
        print('programer running')
    except(KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
        print("programer end")
