import requests
import json
import re
from bs4 import BeautifulSoup

def get_data1():
    url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5&callback=jQuery341001657575837432268_1581070969707&_=1581070969708'
    headers = {'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Mobile Safari/537.36'}
    res = requests.get(url, headers=headers).text
    a = res.split('jQuery341001657575837432268_1581070969707(')[1].split(')')[0]
    c = json.loads(a)
    data1 = json.loads(c['data'])
    return data1

def get_data2():
    url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other&callback=jQuery34109115895667113716_1582869774819&_=1582869774820'
    headers = {'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Mobile Safari/537.36'}
    res = requests.get(url, headers=headers).text
    #res = requests.get(url).text
    a = res.split("jQuery34109115895667113716_1582869774819(")
    b=a[1]
    d=b[0:-1]
    c = json.loads(d)
    data2 = json.loads(c['data'])
    return data2



def get_news():
    url = "http://wjw.beijing.gov.cn/"
    headers = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0"}

    html = requests.get(url , headers = headers).text

    soup = BeautifulSoup(html,"lxml")

    news_path1 = soup.find_all("div",{"class":"main_con_weinei_syx"})[1]

    news_path2 = news_path1.find("div",{"class":"weinei_left_syx"})
    news_path3 = news_path2.find("div",{"class":"wenhua_left_consy_syx"})

    news_tags = news_path3.find_all("div",{"class":"wenhua_left_consy_line_text_syx"})
    times_tag = news_path3.find_all("div",{"class":"wenhua_left_consy_line_date_syx"})


    lists1 = []
    lists2 = []
    lists3 = []
    for news_tag1 in news_tags:
        news = news_tag1.get_text()
        lists1.append(news.strip())
        if (u"新增") in news:
            new_tag = news_tag1.find("a")["href"]
            new_tag = new_tag[2:len(new_tag)]
            new_url = url + new_tag
            lists3.append(new_url)


    global data_year, data_add1

    data_year1 = ""
    data_add1 = ""
    for list3 in lists3:
        html2 = requests.get(list3 , headers = headers).text
        soup2 = BeautifulSoup(html2, "lxml")
        data_tag1 = soup2.find("div",{"id":"zoom"})
        data = data_tag1.get_text()
        data2 = data.split("        ")
        for data in data2:
            if (u"无新增确诊病例") in data:
                data_add1 = data
                break
        if data_add1 !="":
            break
    for list3 in lists3:
        html2 = requests.get(list3 , headers = headers).text
        soup2 = BeautifulSoup(html2, "lxml")
        data_tag1 = soup2.find("div",{"id":"zoom"})
        data = data_tag1.get_text()
        data2 = data.split("        ")
        for data in data2:
            if(u"男性病例") in data:
                data_year1 = data
                break
    
        if data_year1 != "":
            break


    data_add2 = data_add1.split("，")
    global flag
    flag = 0
    for data_add3 in data_add2:
        if (u"石景山区") in data_add3:
            data_add4 = re.sub("\D","",data_add3)
            data_add = "石景山区连续" + data_add4 + "日无增长"
            flag = 1
            break
    if flag == 0:
        data_add = "石景山区14日内有增长"

    for time_tag1 in times_tag:
        time = time_tag1.get_text()
        lists2.append(time.strip())


    labels1 = []
    values1 = []
    data_year2 = data_year1.split("；")
    data_year3 = data_year2[0]
    data_year4 = data_year3.split("，")
    for data_year5 in data_year4:
        if (u"男性病例")in data_year5:
            values1.append(re.sub("\D","",data_year5))
            labels1.append("男性病例"+str(re.sub("\D","",data_year5))+"例")
        elif (u"女性病例") in data_year5:
            values1.append(re.sub("\D","",data_year5))
            labels1.append("女性病例"+str(re.sub("\D","",data_year5))+"例")
    values2 = []
    data_year6 = data_year2[1]
    data_year7 = data_year6.split("，")
    for data_year8 in data_year7:
        if (u"例") in data_year8:
            if (u"以下") in data_year8:
                values2.append(re.sub("\D","",data_year8.split("以下")[1]))
            elif (u"至") in data_year8:
                values2.append(re.sub("\D","",data_year8.split("岁")[2]))
            elif (u"以上") in data_year8:
                values2.append(re.sub("\D","",data_year8.split("以上")[1]))


    return lists1, lists2,data_add, data_add4, labels1, values1, values2