
import urllib.request
import urllib.parse
from lxml import etree
import datetime
from ast import literal_eval

now = datetime.datetime.now()
Time = now.strftime("%m")
Page = now.strftime("%d")
month = int(Time)
day_page = int(Page)  # 起始日期
day_end = int(Page)  # 结束日期

def page_lxml(Requise_data):
    result = etree.HTML(Requise_data)
    To_Time=result.xpath('//ul/li/div[@class="cbp_tmicon"]/text()')
    To_title = result.xpath('//ul/li/div/div/a[@class="pica"]/@title')
    return {"To_Time":To_Time,"To_title":To_title}

def Special(data):
    # 字符集合
    Character = ["\u200b" , "\u2022","\xa0"]
    Char_data=""
    for char in data:
        if not char.isdigit():
            for item in Character:
                Char_data = data.replace(item, '')
            return Char_data.replace("\u200b", '').replace("\u2022", '')
        else:
            return data

def write(data):

        url="http://127.0.0.1:5000/Today/Event/write"
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"
        }
        Request = urllib.request.Request(url=url, headers=headers, data=data)
        urllib.request.urlopen(Request)
        return "写入数据库"

def Requise_encap(url):
    headers={
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"
    }
    Request = urllib.request.Request(url=url, headers=headers)
    requise = urllib.request.urlopen(Request)

    return requise.read().decode('utf-8')

def data_encap(leml_data):
    if len(leml_data["To_Time"]) <= len(leml_data["To_title"]):
        for item in range(len(leml_data["To_Time"])):
            data = {
                "T_year": leml_data["To_Time"][item],  # 发生的年份
                #     # 当有特殊字符
                #     # 进行处理
                "incident": Special(leml_data["To_title"][item]),  # 事件
                # 特殊字符替换
                "month": "{0}-{1}".format(month, day_page)  # 月份和日期
            }
            print(data)
            data = urllib.parse.urlencode(data).encode("utf-8")
            write(data)
    else:
        for item in range(len(leml_data["To_title"])):
            data = {
                "T_year": leml_data["To_Time"][item],  # 发生的年份
                "incident": Special(leml_data["To_title"][item]),  # 事件
                "month": "{0}-{1}".format(month, day_page)  # 月份和日期
            }
            print(data)
            data = urllib.parse.urlencode(data).encode("utf-8")
            write(data)

def main():
    try:
        # 时间
        # 获取当天的数据     # 第一步网页数据
        url = "https://today.help.bj.cn/" + str(month) + "/" + str(day_page) + "/"
        Request_data = Requise_encap(url)
        leml_data = page_lxml(Request_data)
        data_encap(leml_data)

        # 第二步全部数据
        for item in range(day_page, day_end + 1):
            page_ = 1
            while True:
                url = "https://today.help.bj.cn/read/?page=" + str(page_) + "&pagesize=20&month=" + str(
                    month) + "&day=" + str(item)
                Requise_data=Requise_encap(url)
                page_=page_+1
                if Requise_data == "0":
                    break
                Requise_data= literal_eval(Requise_data)
                for item_ in range(len(Requise_data)):
                    data = {
                        "T_year": Requise_data[item_]["solaryear"],  # 发生的年份
                        "incident": Requise_data[item_]["title"],  # 事件
                        "month": "{0}-{1}".format(month, item)  # 月份和日期
                    }
                    print(data)
                    data = urllib.parse.urlencode(data).encode("utf-8")
                    write(data)
        return True
    except Exception as err:
        print(err)
        return False







