# !/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
import os
import time
import bs4 as bs4
from pip._vendor import requests

from 神方.util.write_excel import write_file

data = {
        "序号": ["省份", "城市", "日期", "行业", "内容"]
    }


url = 'https://www.e-jebao.com/notice/list.html?province=16&city=&page=1'


# 获取前1天或N天的日期，beforeOfDay=1：前1天；beforeOfDay=N：前N天
def getdate(beforeOfDay):
    today = datetime.datetime.now()
    # 计算偏移量
    offset = datetime.timedelta(days=-beforeOfDay)
    # 获取想要的日期的时间
    re_date = (today + offset).strftime('%Y-%m-%d')
    return re_date

# 获得当前年份
nowYear = datetime.datetime.now().strftime('%Y')
# 获得当前日期
nowTime_str = getdate(2)
# mktime参数为struc_time,将日期转化为秒，
e_time = time.mktime(time.strptime(nowTime_str, "%Y-%m-%d"))


# 获取下一页的url
def getLastDate(trs):
    # 当前总条数
    num = len(trs)
    # 获取到最后一条的日期
    lastDate = trs[num - 1]('span')[2].string
    print('最后一条的日期是：%s' % lastDate)
    # 日期转化为int比较
    lastDate = time.mktime(time.strptime(lastDate, "%Y-%m-%d"))
    lastDiff = int(lastDate) - int(e_time)
    if lastDiff >= 0:
        return 1


def writeValue(lis, pageNum, index):
    for i in lis:
        spans = i('span')
        if len(spans) == 6:
            # 城市
            city = spans[0].string
            # 内容
            content = spans[1].a['title']
            # 行业
            # industry = spans[1].string
            # 日期
            date = spans[2].string
            s_time = time.mktime(time.strptime(date, "%Y-%m-%d"))
            # 日期转化为int比较
            diff = int(s_time) - int(e_time)
            if diff >= 0:
                list = ['安徽省', city.replace('[', '').replace(']', ''), date, '', content]
                data[str(index)] = list
                index = index + 1

    flag = getLastDate(lis)
    if flag == 1:
        pageNum = pageNum + 1
        urlNext = 'https://www.e-jebao.com/notice/list.html?province=16&city=&page=' + str(pageNum)
        print(urlNext)
        getValue(urlNext, pageNum, index)

def getValue(url, pageNum, index):
    # 模拟浏览器发送http请求
    response = requests.get(url)

    # 设置编码为网站的编码
    response.encoding = response.apparent_encoding

    # 获取到该网站的所有内容
    html = response.text

    soup = bs4.BeautifulSoup(html, 'html.parser')
    lis = soup.find(name='ul', attrs={'id': 'infoContent'}).findAll('li')
    writeValue(lis, pageNum, index)


if __name__ == '__main__':
    now_time = datetime.datetime.now().strftime('%Y%m%d')
    pageNum = 1
    index = 1
    getValue(url, pageNum, index)

    # 判断excel文件是否存在
    file_path = 'D:\\招投标信息' + now_time + '-' + nowTime_str.replace('-', '') + '.xls'
    if os.path.exists(file_path):
        print('删除文件')
        os.remove(file_path)
    # 写入excel文件
    write_file(file_path, data, '安徽省')
