# 定时爬取风险点位
import json
import re
import datetime
import bs4
import requests
from baiduMapApi import postion2Location


def list_to_json(dic1_list):
    today = datetime.datetime.today()
    with open("../data/riskpoints.json", "w", encoding="utf8") as file:  # 注意这里是写模式
        file.write(json.dumps(dic1_list, indent=2, ensure_ascii=False))
    return None


def extract_time(timeInfo):
    # 6月21日10:06-10:37
    timeInfo = f"2022-{timeInfo}".replace("月", "-").replace("日", "-")
    test = re.finditer("\d+", timeInfo)  # 获取所有数字
    number_idxes = []
    for i in test:
        number_idxes.append((i.span()[0], i.span()[1] - 1))
    time_list = list(timeInfo)
    # 加0操作
    for i in number_idxes:
        if (i[1] - i[0]) == 0:
            time_list[i[0]] = "0" + time_list[i[0]]
    # 处理 "左右"
    d = "".join(time_list).replace("：", ":")
    fTime = d[11:16]
    if "左右" in d or "以后" in d:
        if int(fTime[3:5]) + 1 != 60:
            sTime = fTime[0:3] + str(int(fTime[3:5]) + 1)
        else:
            sTime = str(int(fTime[0:2]) + 1) + ":00"
        if sTime[-2] == ":":
            sTime = sTime[0:3] + "0" + sTime[-1]
        return d[0:11] + fTime, d[0:11] + sTime
    else:
        return d[0:16], d[0:11] + d[17:]


def extract_position(position):
    position = position.replace("—", "-").replace("（", "(").replace("）", ")")
    # 处理公交车站
    match1 = re.findall(r'公交车[(]\D+-\D+', position)
    # 从通州区果园站乘坐地铁1号线到朝阳区大望路站，换乘14号线到丰台区景泰站下车
    # 从丰台区景泰站乘坐地铁14号线到大望路站，换乘地铁1号线到通州区果园站下车
    # 从通州区果园站乘坐地铁1号线到朝阳区大望路站，换乘14号线到丰台区景泰站下车
    match2 = re.findall(r"从\D+乘坐\D+", position)
    # 乘坐地铁八通线，四惠东站上车，梨园站下车
    match3 = re.findall(r"\D+上车\D+下车", position)
    # 6号线北运河西站上地铁，至呼家楼站换乘10号线，牡丹园站下地铁
    match4 = re.findall(r"\D+上地铁\D+", position)
    if match1:
        position = position.replace("(", "").replace(")", "")
        idx = position.find("-")
        position = position[0:idx] + "站"
    if match2:
        idx = position.find("乘坐")
        position = position[1:idx - 1] + "地铁站"
    if match3:
        idx1 = position.find("，")
        idx2 = position.find("上车")
        if idx1 > idx2:
            position = position[0:idx2]
        else:
            position = position[idx1 + 1:idx2]
    if match4:
        idx1 = position.find("，")
        idx2 = position.find("上地铁")
        if idx1 > idx2:
            position = position[0:idx2]
        else:
            position = position[idx1 + 1:idx2]
    # 提取经纬度
    lng, lat = postion2Location(position)
    return position, lng, lat


def extract_info(text):
    #  {
    #   "startTime": "2022-06-09-14:50",
    #   "endTime": "2022-06-09-15:00",
    #   "riskPoint": "经开区北京美捷乐商贸中心",
    #   "lng": 116.42566536450947,
    #   "lat": 39.98214542749171
    # },

    data = text.split()
    if len(data) == 5:  # 标准格式
        dicList = []
        timeList = []
        position = data[0]
        timeInfo = data[2]
        publishTime = data[4]  # 发布时间
        # 提取地址
        position, lng, lat = extract_position(position)
        # 提取时间
        match1 = re.findall(r'\d{1,2}月\d{1,2}日\d{1,2}:\d{1,2}-\d{1,2}:\d{1,2}', timeInfo)
        match2 = re.findall(r'\d{1,2}月\d{1,2}日\d{1,2}:\d{1,2}左右', timeInfo)
        match3 = re.findall(r'\d{1,2}月\d{1,2}日\d{1,2}:\d{1,2}以后', timeInfo)
        for m in match1 + match2 + match3:
            startTime, endTime = extract_time(m)
            timeList.append((startTime, endTime))
        for t in timeList:
            dicList.append(dict(startTime=t[0], endTime=t[1], riskPoint=position, lng=lng, lat=lat))
        return dicList
    else:
        return []


def refine_json():
    today = datetime.datetime.today()
    new_dict = []
    with open("../data/riskpoints.json", "r") as file:
        data = json.load(file)
        for d in data:
            if d["lng"] != 0:
                new_dict.append(d)
    newDictList = sorted(new_dict,key=lambda x:x["startTime"],reverse=True)
    resultDictList = []
    for d in newDictList:
        if d["startTime"][0:10]<=str(datetime.datetime.today().date()):
            resultDictList.append(d)

    list_to_json(resultDictList)

    return None


def get_data():
    url = "http://m.bj.bendibao.com/news/fengxianchangsuo/?qu=&time=0&place=2"
    res = requests.get(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0(Macintosh;lnterl Mac OS X 10_14_6) AppleWebKit/537.36(KHTML,like Gecko)Chrome/87.0.4280.88 Safari/537.36'
        }
    )
    res.encoding = res.apparent_encoding
    dic_list = []
    if res.status_code == 200:
        soup = bs4.BeautifulSoup(res.text, 'html.parser')
        # body > div.box > div.container > div.list_con > ul > li:nth-child(1) > div.dot_name
        # body > div.box > div.container > div.list_con > ul > li:nth-child(16)
        anchors = soup.select('div.list_con>ul>li')  ##右键copy selector
        for anchor in anchors:
            dic_list = dic_list + extract_info(anchor.text)
        list_to_json(dic_list)
        refine_json()
        return True
    else:
        print('无法获取页面')
        return False


if __name__ == '__main__':
    print(get_data())    
