# -*- coding:utf-8 -*-
import os
import json
import time
import requests
import csv
import re
import pandas as pd
from datetime import date, timedelta
import numpy as np
from bs4 import BeautifulSoup
from lxml import etree
import encodings.idna
from PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal
from loguru import logger


logger.add('crawl.log')



yesterday = str((date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")).replace('-', '%2F')
# 前七天
yesterday_seven = str((date.today() + timedelta(days=-7)).strftime("%Y-%m-%d")).replace('-', '%2F')
# 今天
today = str(date.today()).replace('-', '%2F')
today_china_water = str(date.today())

today_current = str(date.today())
print(today_current)

# 当前时间戳
current_milli_times = lambda: int(round(time.time() * 1000))
current_milli_time = current_milli_times()


class WCrawl(QObject):

    # 信号
    on_GuangdongRainCrawl = pyqtSignal(str)
    on_GuangdongWaterCrawl=pyqtSignal(str)

    on_ChinaQualityCrawl=pyqtSignal(str)
    on_ZhujiangCrawl=pyqtSignal(str)
    on_yellow = pyqtSignal(str)

    on_greatRiver=pyqtSignal(str)
    on_greatReservoir=pyqtSignal(str)
    on_pointHydroInfo=pyqtSignal(str)


    on_one_date_icp=pyqtSignal(str)
    on_one_date_mcps= pyqtSignal(str)
    on_one_date_il=pyqtSignal(str)
    on_one_date_ir=pyqtSignal(str)
    on_one_date_arc=pyqtSignal(str)

    on_seven_date_icp = pyqtSignal(str)
    on_seven_date_mcps = pyqtSignal(str)
    on_seven_date_il = pyqtSignal(str)
    on_seven_date_ir = pyqtSignal(str)
    on_seven_date_arc = pyqtSignal(str)

    # 广东雨情日数据
    @pyqtSlot()
    def GuangdongRainCrawl(self):
        if not os.path.exists('data/广东雨情日数据'):
            os.makedirs('./data/广东雨情日数据')
        else:
            pass
        paths_water = './data/广东雨情日数据'

        name_csv = '{}/{}_Guangdong_Daily_Rain.csv'.format(paths_water, today_china_water)
        name_xlsx = '{}/{}_Guangdong_Daily_Rain.xlsx'.format(paths_water, today_china_water)

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['市', '市(县)', '站点', '累计雨量(毫米)', '时间标头'])

        url = 'http://113.108.186.79:9001/Report/RainReport.aspx'
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'
            , 'Upgrade-Insecure-Requests': '1'
            , 'Host': '113.108.186.79:9001'
            ,
            'Cookie': 'UM_distinctid=1758d35fe45758-09dc3e4a7c3b87-3a1e3706-1fa400-1758d35fe46b1c; CNZZDATA1260055671=1626281541-1604388562-%7C1604388562; CNZZDATA1260055677=947552259-1604393179-http%253A%252F%252F113.108.186.79%253A9001%252F%7C1604455124'
            ,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
        }
        html = requests.get(url, headers=headers).text
        datas = etree.HTML(html)
        leftTree = datas.xpath('//div[@id="LeftTree"]/table')
        for letr in leftTree:
            center = letr.xpath('thead/tr/td/text()')[0].strip()
            trs = letr.xpath('tbody/tr')[2:]
            for tr in trs:
                urban = tr.xpath('td[1]/text()')[0].strip().replace('\"', '')
                countr = tr.xpath('td[2]/text()')[0].strip().replace('\"', '')
                site = tr.xpath('td[3]/text()')[0].strip().replace('\"', '')
                rain = tr.xpath('td[4]/text()')[0].strip().replace('\"', '')

                csv_writes.writerow([urban, countr, site, rain, center, center])
        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)
        print(today_current + '------广东雨情系统已经完成')

        self.on_GuangdongRainCrawl.emit(today_current + '------广东雨情系统已经完成')

    # 广东水情日数据
    @pyqtSlot()
    def GuangdongWaterCrawl(self):
        if not os.path.exists('data/广东水情日数据'):
            os.makedirs('./data/广东水情日数据')
        else:
            pass
        paths_water = './data/广东水情日数据'

        url = 'http://113.108.186.79:9001/Report/WaterReport.aspx'
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'
            , 'Upgrade-Insecure-Requests': '1'
            , 'Host': '113.108.186.79:9001'
            , 'Accept-Encoding': 'gzip, deflate'
            ,
            'Cookie': 'UM_distinctid=1758d35fe45758-09dc3e4a7c3b87-3a1e3706-1fa400-1758d35fe46b1c; CNZZDATA1260055677=947552259-1604393179-http%253A%252F%252F113.108.186.79%253A9001%252F%7C1604455124; CNZZDATA1260055671=1626281541-1604388562-%7C1604468248'
            ,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
        }
        html = requests.get(url, headers=headers).text

        datas = etree.HTML(html)
        name_files_left_csv = '{}/{}_Guangdong_Daily_Water_RiverCourse.csv'.format(paths_water, today_china_water)
        name_files_left_xlsx = '{}/{}_Guangdong_Daily_Water_RiverCourse.xlsx'.format(paths_water, today_china_water)

        files_left = open(name_files_left_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes_left = csv.writer(files_left)
        csv_writes_left.writerow(['市', '市(县)', '站点', '时间', '水位', '警戒水位', '水势', '标头'])

        leftTree = datas.xpath('//div[@id="LeftTree"]/table')
        for letr in leftTree:
            center = letr.xpath('thead/tr/td/text()')[0].strip()
            trs = letr.xpath('tbody/tr')[2:]
            for tr in trs:
                urban = tr.xpath('td[1]/text()')[0].strip().replace('\"', '')
                countr = tr.xpath('td[2]/text()')[0].strip().replace('\"', '')
                site = tr.xpath('td[3]/text()')[0].strip().replace('\"', '')
                times = tr.xpath('td[4]/text()')[0].strip().replace('\"', '')
                water_level = tr.xpath('td[5]/text()')[0].strip().replace('\"', '')
                try:
                    warning_water_level = tr.xpath('td[6]/text()')[0].strip().replace('\"', '')
                except IndexError:
                    warning_water_level = ""
                # 水势
                water_potential = tr.xpath('td[7]/text()')[0].strip().replace('\"', '')
                csv_writes_left.writerow(
                    [urban, countr, site, times, water_level, warning_water_level, water_potential,center])

        files_left.close()
        csv_left_data = pd.read_csv(name_files_left_csv, encoding='utf_8_sig')
        csv_left_data.to_excel(name_files_left_xlsx, index_label=False, index=False)


        name_files_right_csv = '{}/{}_Guangdong_Daily_Water_ReservoirStation.csv'.format(paths_water, today_china_water)
        name_files_right_xlsx = '{}/{}_Guangdong_Daily_Water_ReservoirStation.xlsx'.format(paths_water,
                                                                                                today_china_water)
        files_right = open(name_files_right_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes_right = csv.writer(files_right)
        csv_writes_right.writerow(['市', '市(县)', '站点', '时间', '水位', '讯限水位', '标头'])

        RightTree = datas.xpath('//div[@id="RightTree"]/table')

        for right in RightTree:
            center = right.xpath('tr[1]/td/text()')[0].strip()
            trs = right.xpath('tr')[3:]
            for tr in trs:
                try:
                    urban = tr.xpath('td[1]/text()')[0].strip().replace('\"', '')
                except IndexError:
                    urban = ""
                try:
                    countr = tr.xpath('td[2]/text()')[0].strip().replace('\"', '')
                except IndexError:
                    countr = ""
                site = tr.xpath('td[3]/text()')[0].strip().replace('\"', '')
                times = tr.xpath('td[4]/text()')[0].strip().replace('\"', '')
                water_level = tr.xpath('td[5]/text()')[0].strip().replace('\"', '')
                try:
                    warning_water_level = tr.xpath('td[6]/text()')[0].strip().replace('\"', '')
                except IndexError:
                    warning_water_level = ""

                csv_writes_right.writerow([urban, countr, site, times, water_level, warning_water_level, center])

        files_right.close()
        csv_right_data = pd.read_csv(name_files_right_csv, encoding='utf_8_sig')
        csv_right_data.to_excel(name_files_right_xlsx, index_label=False, index=False)

        print(today_current + '------广东水情系统已经完成')

        self.on_GuangdongWaterCrawl.emit(today_current + '------广东水情系统已经完成')

    # 国家地表水质
    @pyqtSlot()
    def ChinaQualityCrawl(self):
        if not os.path.exists('data/全国地表水质日数据'):
            os.makedirs('./data/全国地表水质日数据')
        else:
            pass
        paths_china_quality = './data/全国地表水质日数据'

        name_csv = '{}/{}_China_Surface_water_quality.csv'.format(paths_china_quality, today_china_water)
        name_xlsx = '{}/{}_China_Surface_water_quality.xlsx'.format(paths_china_quality, today_china_water)

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)

        url = 'http://106.37.208.243:8068/GJZ/Ajax/Publish.ashx'
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'
            , 'Referer': 'http://106.37.208.243:8068/GJZ/Business/Publish/RealData.html'
            , 'Origin': 'http://106.37.208.243:8068'
            , 'Host': '106.37.208.243:8068'}

        field = {'AreaID': ''
            , 'RiverID': ''
            , 'MNName': ''
            , 'PageIndex': '-1'
            , 'PageSize': '60'
            , 'action': 'getNewDatas'
                 }
        html = requests.post(url, data=field, headers=headers)
        html.encoding = 'utf-8'
        datas = json.loads(html.text)
        titles = datas['thead']
        app = []
        for title in titles:
            head = title.replace("<br/><span class='unit'>", '').replace("</span>", '')
            app.append(head)
        csv_writes.writerow(app)
        app_dict = []
        tbodys = datas['tbody']

        for tbody in tbodys:
            province = tbody[0]
            watershed = tbody[1]
            try:
                river = \
                re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>", tbody[2])[
                    0]
            except IndexError:
                river = tbody[2]
            monitoring_time = tbody[3]
            water_qua_cate = tbody[4]
            try:
                water_temp = \
                    re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>", tbody[5])[
                        0]
            except IndexError:
                water_temp = tbody[5]
            try:
                ph = \
                re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>", tbody[6])[0]
            except IndexError:
                ph = tbody[6]
            try:
                dissolved_oxygen = \
                    re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>", tbody[7])[
                        0]
            except IndexError:
                dissolved_oxygen = tbody[7]
            try:
                conductivity = \
                    re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>", tbody[8])[
                        0]
            except IndexError:
                conductivity = tbody[8]
            # 浊度
            try:
                turbidity = \
                    re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>", tbody[9])[
                        0]
            except IndexError:
                turbidity = tbody[9]
            # 高锰酸盐指数
            try:
                permanganate_index = \
                    re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>",
                               tbody[10])[0]
            except IndexError:
                permanganate_index = tbody[10]
            try:
                ammonia = \
                    re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>",
                               tbody[11])[0]
            except IndexError:
                ammonia = tbody[11]
            try:

                total_phosphorus = \
                    re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>",
                               tbody[12])[0]
            except IndexError:
                total_phosphorus = tbody[12]

            try:
                total_ammonia = \
                    re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>",
                               tbody[13])[0]
            except IndexError:
                total_ammonia = tbody[13]
            try:
                chlorophyll_alpha = \
                    re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>",
                               tbody[14])[0]
            except IndexError:
                chlorophyll_alpha = tbody[14]
            try:
                algae_density = \
                    re.findall("<span data-toggle='tooltip' data-placement='right' title='.*?'>(.*?)</span>",
                               tbody[15])[0]
            except IndexError:
                algae_density = tbody[15]

            site_situation = tbody[16]
            app_dict.append([province, watershed, river, monitoring_time, water_qua_cate
                                , water_temp, ph, dissolved_oxygen, conductivity, turbidity
                                , permanganate_index, ammonia, total_phosphorus, total_ammonia
                                , chlorophyll_alpha, algae_density, site_situation
                             ])

        for data in app_dict:
            csv_writes.writerow(data)

        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)

        self.on_ChinaQualityCrawl.emit(today_current + '------国家地表水质已经完成')

    # 珠江流域
    @pyqtSlot()
    def ZhujiangCrawl(self):
        if not os.path.exists('data/珠江水情日数据'):
            os.makedirs('./data/珠江水情日数据')
        else:
            pass
        paths_zhujiang_con = './data/珠江水情日数据'

        url = 'http://www.zwswj.com/zwsw2011/asp/syqxx/main_syqxx.asp'
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'
            ,
            'Cookie': 'ant_stream_58ee42b978925=1604425571/213726201; bow_stream_58ee42b978925=13; ASPSESSIONIDQCBRACDC=EGBMAAEAIMJEIDEMMPJFFHKJ; ASPSESSIONIDQABRBDCC=EFAAEMABBPGALKNFGEBIAPLJ'
            , 'Host': 'www.zwswj.com'
            ,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
            , 'Proxy-Connection': 'keep-alive'
        }
        html = requests.get(url, headers=headers)
        html.encoding = 'gb2312'
        datas = etree.HTML(html.text)

        name_files_left_csv = '{}/{}_Zhujiang_Daily_Water_Main_Reservoir.csv'.format(paths_zhujiang_con,
                                                                                     today_china_water)
        name_files_left_xlsx = '{}/{}_Zhujiang_Daily_Water_Main_Reservoir.xlsx'.format(paths_zhujiang_con,
                                                                                       today_china_water)

        files_left = open(name_files_left_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes_left = csv.writer(files_left)
        csv_writes_left.writerow(['站名', '河名', '时间', '库内水位(m)', '蓄水量(106m3)', '入库(m3/s)', '出库(m3/s)'])

        leftTree = datas.xpath('//div[@align="center"]/center/table[2]/tr')[1:]
        for letr in leftTree:
            urban = letr.xpath('td[1]/font/text()')[0].strip()
            countr = letr.xpath('td[2]/font/text()')[0].strip()
            site = letr.xpath('td[3]/font/text()')[0].strip()
            times = letr.xpath('td[4]/font/text()')[0].strip()
            water_level = letr.xpath('td[5]/font/text()')[0].strip()
            try:
                warning_water_level = letr.xpath('td[6]/font/text()')[0].strip()
            except IndexError:
                warning_water_level = ""
            # 入库
            water_potential = letr.xpath('td[7]/font/text()')[0].strip()

            csv_writes_left.writerow([urban, countr, site, times, water_level, warning_water_level, water_potential])

        files_left.close()
        csv_left_data = pd.read_csv(name_files_left_csv, encoding='utf_8_sig')
        csv_left_data.to_excel(name_files_left_xlsx, index_label=False, index=False)
        print(today_current + '------珠江流域主要水库已经完成')

        name_files_right_csv = '{}/{}_Zhujiang_Daily_Water_Main_Site.csv'.format(paths_zhujiang_con, today_china_water)
        name_files_right_xlsx = '{}/{}_Zhujiang_Daily_Water_Main_Site.xlsx'.format(paths_zhujiang_con,
                                                                                   today_china_water)

        files_right = open(name_files_right_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes_right = csv.writer(files_right)
        csv_writes_right.writerow(['站名', '河名', '时间', '水位(m)', '流量(m3/s)', '水势'])

        RightTree = datas.xpath('//div[@align="center"]/center/table[4]/tr')[1:]

        for right in RightTree:

            urban = right.xpath('td[1]/font/text()')[0].strip()
            countr = right.xpath('td[2]/font/text()')[0].strip()
            site = right.xpath('td[3]/font/text()')[0].strip()
            times = right.xpath('td[4]/font/text()')[0].strip()
            water_level = right.xpath('td[5]/font/text()')[0].strip()
            try:
                warning_water_level = right.xpath('td[6]/font/text()')[0].strip()
            except IndexError:
                warning_water_level = ""

            csv_writes_right.writerow([urban, countr, site, times, water_level, warning_water_level])

        files_right.close()
        csv_right_data = pd.read_csv(name_files_right_csv, encoding='utf_8_sig')
        csv_right_data.to_excel(name_files_right_xlsx, index_label=False, index=False)

        self.on_ZhujiangCrawl.emit(today_current + '------珠江流域主要控制站已经完成')
    '''
    全国水情
    '''

    @pyqtSlot()
    def greatRiver(self):
        '''
        大江大河
        :return:
        '''
        if not os.path.exists('data/全国水情日数据'):
            os.makedirs('./data/全国水情日数据')
        else:
            pass
        paths_china_con = './data/全国水情日数据'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }

        name_csv = '{}/{}_China_River Water.csv'.format(paths_china_con, today_current)
        name_xlsx = '{}/{}_China_River Water.xlsx'.format(paths_china_con, today_current)

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['流域', '行政区', '河名', '站名', '时间', '水位(米)', '流量(米3/秒)', '警戒水位(米)'])

        htmls = requests.get('http://xxfb.mwr.cn/hydroSearch/greatRiver', headers=headers).text

        grs = json.loads(htmls)['result']['data']
        for gr in grs:
            # 流域
            poiBsnm = gr['poiBsnm'].strip()
            # 行政区
            poiAddv = gr['poiAddv'].strip()
            # 河名
            rvnm = gr['rvnm'].strip()
            # 站名
            stnm = gr['stnm'].strip()
            # 时间
            times = gr['dateTime'].strip()
            # 水位(米)
            zl = gr['zl']
            # 流量(米3/秒)
            ql = gr['ql']
            # 警戒水位
            wrz = gr['wrz']
            csv_writes.writerow([poiBsnm, poiAddv, rvnm, stnm, times, zl, ql, wrz])

        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)
        print(today_current + '-----全国水情-大江大河已经完成')

        self.on_greatRiver.emit(today_current + '-----全国水情-大江大河已经完成')


    @pyqtSlot()
    def greatReservoir(self):
        '''
        大型东江
        :return:
        '''
        if not os.path.exists('data/全国水情日数据'):
            os.makedirs('./data/全国水情日数据')
        else:
            pass
        paths_china_con = './data/全国水情日数据'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }

        name_csv = '{}/{}_China_Reservoir Water.csv'.format(paths_china_con, today_current)
        name_xlsx = '{}/{}_China_Reservoir Water.xlsx'.format(paths_china_con, today_current)

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['流域', '行政区', '河名', '库名', '库水位(米)', '蓄水量(百万3)', '入库(米3/秒)', '坝顶高程(米)', '时间'])

        htmls = requests.get('http://xxfb.mwr.cn/hydroSearch/greatRsvr', headers=headers).text

        grs = json.loads(htmls)['result']['data']
        for gr in grs:
            # 流域
            poiBsnm = gr['poiBsnm'].strip()
            # 行政区
            poiAddv = gr['poiAddv'].strip()
            # 河名
            rvnm = gr['rvnm'].strip()
            # 库名
            stnm = gr['stnm'].strip()
            # 库水位(米)
            rz = gr['rz']
            # 蓄水量(百万3)
            wl = gr['wl']
            # 入库(米3/秒)
            inq = gr['inq']
            # 坝顶高程(米)
            damel = gr['damel']
            # 时间
            tm = gr['tm']

            csv_writes.writerow([poiBsnm, poiAddv, rvnm, stnm, rz, wl, inq, damel, tm])

        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        
        
        csv_data.to_excel(name_xlsx, index_label=False, index=False)
        
        print(today_current + '-----全国水情-大型水库已经完成')

        self.on_greatReservoir.emit(today_current + '-----全国水情-大型水库已经完成')


    @pyqtSlot()
    def pointHydroInfo(self):
        '''
        重点雨水情
        :return:
        '''
        if not os.path.exists('data/全国水情日数据'):
            os.makedirs('./data/全国水情日数据')
        else:
            pass
        paths_china_con = './data/全国水情日数据'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }

        name_csv = '{}/{}_China_Rain.csv'.format(paths_china_con, today_current)
        name_xlsx = '{}/{}_China_Rain.xlsx'.format(paths_china_con, today_current)

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['流域', '行政区', '河名', '站名', '日期', '日雨量(毫米)', '天气'])

        htmls = requests.get('http://xxfb.mwr.cn/hydroSearch/pointHydroInfo', headers=headers).text

        grs = json.loads(htmls)['result']['data']
        for gr in grs:
            # 流域
            poiBsnm = gr['poiBsnm'].strip()
            # 行政区
            poiAddv = gr['poiAddv'].strip()
            # 河名
            rvnm = gr['rvnm'].strip()
            # 站名
            stnm = gr['stnm'].strip()
            # 时间
            tm = gr['tm']
            # 日雨量
            dyp = gr['dyp']
            # 天气
            wth = gr['wth']
            if wth == '7':
                wth = '雨'
            elif wth == '8':
                wth = '阴'
            elif wth == '9':
                wth = '晴'
            else:
                wth = ''
            csv_writes.writerow([poiBsnm, poiAddv, rvnm, stnm, tm, dyp, wth])
        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)

        print(today_current + '-----全国水情-重点雨水情已经完成')

        self.on_pointHydroInfo.emit(today_current + '-----全国水情-大型水库已经完成')

    # 时间转换
    def changetime(self, timeNum):
        timeStamp = float(timeNum / 1000)
        timeArray = time.localtime(timeStamp)
        otherStyleTime = time.strftime("%Y-%m-%d %H", timeArray)

        return otherStyleTime

    '''
    东江水情
    '''

    @pyqtSlot()
    def one_date_icp(self):
        '''
        # 重要控制断面
        :return:
        '''
        time.sleep(2.5)

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }
        if not os.path.exists('data/东江水情日数据'):
            os.makedirs('./data/东江水情日数据')
        else:
            pass
        paths_zhujiang_con = './data/东江水情日数据/'

        name_csv = paths_zhujiang_con + today_current + '_Dongjiang_Daily Runoff.csv'
        name_xlsx = paths_zhujiang_con + today_current + '_Dongjiang_Daily Runoff.xlsx'

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['测站名称', '时间', '水位', '流量'])

        import_control_panel = 'http://113.107.242.152/DJPublic/publicDataC/getRiverData24?_dc={}&from={}&to={}&page=1&start=0&limit=25&sort=%5B%7B%22property%22%3A%22stcd%22%2C%22direction%22%3A%22ASC%22%7D%2C%7B%22property%22%3A%22tm%22%2C%22direction%22%3A%22DESC%22%7D%5D'.format(
            current_milli_time, yesterday, today)
        icp = requests.get(import_control_panel, headers=headers).text
        icp_datas = json.loads(icp)
        for icp_data in icp_datas:
            station_name = icp_data['stnm']
            timeStamp = icp_data['tm']

            station_time = self.changetime(timeStamp)


            z = icp_data['z']
            if z == None:
                water_level = ""
            else:
                water_level = np.round(z, 2)

            q = icp_data['q']
            if q == None:
                flow = ""
            else:
                flow = int(q)

            csv_writes.writerow([station_name, station_time, water_level, flow])
        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)

        logger.info(today_current + '------东江1日重要控制断面已经完成')

        self.on_one_date_icp.emit(today_current + '------东江1日重要控制断面已经完成')

    @pyqtSlot()
    def one_date_ir(self):
        '''
        重要水库
        :return:
        '''

        time.sleep(2.5)
        if not os.path.exists('data/东江水情日数据'):
            os.makedirs('./data/东江水情日数据')
        else:
            pass
        paths_zhujiang_con = './data/东江水情日数据/'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }

        name_csv = paths_zhujiang_con + today_current + '_Dongjiang_Daily Reservoir.csv'
        name_xlsx = paths_zhujiang_con + today_current + '_Dongjiang_Daily Reservoir.xlsx'

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['测站名称', '时间', '库水位', '出库流量'])
        # 重要水库
        import_reservoir = 'http://113.107.242.152/DJPublic/publicDataC/getRsvrData24?_dc={}&from={}&to={}&page=1&start=0&limit=25&sort=%5B%7B%22property%22%3A%22stcd%22%2C%22direction%22%3A%22ASC%22%7D%2C%7B%22property%22%3A%22tm%22%2C%22direction%22%3A%22DESC%22%7D%5D'.format(
            current_milli_time, yesterday, today)
        ir = requests.get(import_reservoir, headers=headers).text
        ir_datas = json.loads(ir)
        for ir_data in ir_datas:
            station_name = ir_data['stnm']
            timeStamp = ir_data['tm']
            station_time = self.changetime(timeStamp)
            rz = ir_data['rz']
            if rz == None:
                water_level = ""
            else:
                water_level = np.round(rz, 2)

            otq = ir_data['otq']
            if otq == None:
                flow = ""
            else:
                flow = int(otq)

            csv_writes.writerow([station_name, station_time, water_level, flow])
        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)


        logger.info(today_current + '-----东江1日重要水库已经完成')

        self.on_one_date_ir.emit(today_current + '-----东江1日重要水库已经完成')

    @pyqtSlot()
    def one_date_mcps(self):
        '''
        干流梯级电站
        :return:
        '''
        time.sleep(2.5)
        if not os.path.exists('data/东江水情日数据'):
            os.makedirs('./data/东江水情日数据')
        else:
            pass
        paths_zhujiang_con = './data/东江水情日数据/'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }
        name_csv = paths_zhujiang_con + today_current + '_Dongjiang_Daily Plant.csv'
        name_xlsx = paths_zhujiang_con + today_current + '_Dongjiang_Daily Plant.xlsx'

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['测站名称', '时间', '坝上水位', '坝下水位', '发电流量'])

        mainstream_cascade_power_station = 'http://113.107.242.152/DJPublic/publicDataC/getWasData24?_dc={}&from={}&to={}&page=1&start=0&limit=25&sort=%5B%7B%22property%22%3A%22stcd%22%2C%22direction%22%3A%22ASC%22%7D%2C%7B%22property%22%3A%22tm%22%2C%22direction%22%3A%22DESC%22%7D%5D'.format(
            current_milli_time, yesterday, today)
        mcps = requests.get(mainstream_cascade_power_station, headers=headers).text
        mcps_datas = json.loads(mcps)

        for mcps_data in mcps_datas:
            station_name = mcps_data['stnm']
            timeStamp = mcps_data['tm']
            station_time = self.changetime(timeStamp)

            upz = mcps_data['upz']

            if upz == None:
                upz_water = ""
            else:
                upz_water = upz

            dwz = mcps_data['dwz']
            if dwz == None:
                dwz_water = ""
            else:
                dwz_water = dwz

            tgtq = mcps_data['tgtq']
            if tgtq == None:
                tgtq_water = ""
            else:
                tgtq_water = int(np.round(tgtq))

            csv_writes.writerow([station_name, station_time, upz_water, dwz_water, tgtq_water])
        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)



        logger.info(today_current + '------东江1日干流梯级电站已经完成')
        self.on_one_date_mcps.emit(today_current + '------东江1日干流梯级电站已经完成')


    @pyqtSlot()
    def one_date_il(self):
        '''
        重要咸情
        :return:
        '''
        time.sleep(2.5)
        if not os.path.exists('data/东江水情日数据'):
            os.makedirs('./data/东江水情日数据')
        else:
            pass
        paths_zhujiang_con = './data/东江水情日数据/'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }

        name_csv = paths_zhujiang_con + today_current + '_Dongjiang_Daily Salt.csv'
        name_xlsx = paths_zhujiang_con + today_current + '_Dongjiang_Daily Salt.xlsx'

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['测站名称', '时间', '含氯度'])

        import_love = 'http://113.107.242.152/DJPublic/publicDataC/getSltData24?_dc={}&from={}&to={}&page=1&start=0&limit=25&sort=%5B%7B%22property%22%3A%22stcd%22%2C%22direction%22%3A%22ASC%22%7D%2C%7B%22property%22%3A%22tm%22%2C%22direction%22%3A%22DESC%22%7D%5D'.format(
            current_milli_time, yesterday, today)
        il = requests.get(import_love, headers=headers).text
        il_datas = json.loads(il)
        for il_data in il_datas:
            station_name = il_data['stnm']
            timeStamp = il_data['tm']
            station_time = self.changetime(timeStamp)

            chlrn = il_data['chlrn']

            if chlrn == None:
                chlrn_water = ""
            else:
                chlrn_water = np.round(chlrn, 1)

            csv_writes.writerow([station_name, station_time, chlrn_water])

        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)


        logger.info(today_current + '-------东江1日重要咸情已经完成')

        self.on_one_date_ir.emit(today_current + '-------东江1日重要咸情已经完成')


    @pyqtSlot()
    def one_date_arc(self):
        time.sleep(2.5)
        if not os.path.exists('data/东江水情日数据'):
            os.makedirs('./data/东江水情日数据')
        else:
            pass
        paths_zhujiang_con = './data/东江水情日数据/'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }
        # 来电区间面平均雨量
        name_csv = paths_zhujiang_con + today_current + '_Dongjiang_Daily Rain.csv'
        name_xlsx = paths_zhujiang_con + today_current + '_Dongjiang_Daily Rain.xlsx'

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['测站名称', '时间', '平均雨量'])

        avg_rain_call = 'http://113.107.242.152/DJPublic/publicDataC/getPptnAvgData24?_dc={}&from={}&to={}&page=1&start=0&limit=25'.format(
            current_milli_time, yesterday, today)

        arc = requests.get(avg_rain_call, headers=headers).text
        arc_datas = json.loads(arc)
        for arc_data in arc_datas:
            station_name = arc_data['wfnm']
            timeStamp = arc_data['tm']
            station_time = self.changetime(timeStamp)

            avdrp = arc_data['avdrp']
            if avdrp == None:
                avdrp_water = ""
            else:
                avdrp_water = avdrp

            csv_writes.writerow([station_name, station_time, avdrp_water])

        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)
        logger.info(today_current + '------东江1日已经完成')

        self.on_one_date_arc.emit(today_current + '------东江1日已经完成')

    @pyqtSlot()
    def seven_date_icp(self):
        '''
        # 重要控制断面
        :return:
        '''
        if not os.path.exists('data/东江水情日数据'):
            os.makedirs('./data/东江水情日数据')
        else:
            pass
        paths_zhujiang_con = './data/东江水情日数据/'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }

        name_csv = paths_zhujiang_con + today_current + '_Dongjiang_7 day Runoff.csv'
        name_xlsx = paths_zhujiang_con + today_current + '_Dongjiang_7 day Runoff.xlsx'

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['测站名称', '时间', '水位', '流量'])

        import_control_panel = 'http://113.107.242.152/DJPublic/publicDataC/getRiverData24?_dc={}&from={}&to={}&page=1&start=0&limit=25&sort=%5B%7B%22property%22%3A%22stcd%22%2C%22direction%22%3A%22ASC%22%7D%2C%7B%22property%22%3A%22tm%22%2C%22direction%22%3A%22DESC%22%7D%5D'.format(
            current_milli_time, yesterday_seven, today)
        icp = requests.get(import_control_panel, headers=headers).text
        icp_datas = json.loads(icp)

        for icp_data in icp_datas:
            station_name = icp_data['stnm']
            timeStamp = icp_data['tm']

            station_time = self.changetime(timeStamp)

            z = icp_data['z']
            if z == None:
                water_level = ""
            else:
                water_level = np.round(z, 2)

            q = icp_data['q']
            if q == None:
                flow = ""
            else:
                flow = int(q)

            csv_writes.writerow([station_name, station_time, water_level, flow])
        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)

        logger.info(today_current + '------东江7日重要控制断面已经完成')

        self.on_seven_date_icp.emit(today_current + '------东江7日重要控制断面已经完成')


    @pyqtSlot()
    def seven_date_ir(self):
        '''
        重要水库
        :return:
        '''
        time.sleep(2.5)
        if not os.path.exists('data/东江水情日数据'):
            os.makedirs('./data/东江水情日数据')
        else:
            pass
        paths_zhujiang_con = './data/东江水情日数据/'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }
        name_csv = paths_zhujiang_con + today_current + '_Dongjiang_7 day Reservoir.csv'
        name_xlsx = paths_zhujiang_con + today_current + '_Dongjiang_7 day Reservoir.xlsx'

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['测站名称', '时间', '库水位', '出库流量'])
        # 重要水库
        import_reservoir = 'http://113.107.242.152/DJPublic/publicDataC/getRsvrData24?_dc={}&from={}&to={}&page=1&start=0&limit=25&sort=%5B%7B%22property%22%3A%22stcd%22%2C%22direction%22%3A%22ASC%22%7D%2C%7B%22property%22%3A%22tm%22%2C%22direction%22%3A%22DESC%22%7D%5D'.format(
            current_milli_time, yesterday_seven, today)
        ir = requests.get(import_reservoir, headers=headers).text
        ir_datas = json.loads(ir)
        for ir_data in ir_datas:
            station_name = ir_data['stnm']
            timeStamp = ir_data['tm']
            station_time = self.changetime(timeStamp)
            rz = ir_data['rz']
            if rz == None:
                water_level = ""
            else:
                water_level = np.round(rz, 2)

            otq = ir_data['otq']
            if otq == None:
                flow = ""
            else:
                flow = int(otq)

            csv_writes.writerow([station_name, station_time, water_level, flow])
        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)

        logger.info(today_current + '------东江7日重要水库已经完成')

        self.on_seven_date_ir.emit(today_current + '------东江7日重要水库已经完成')

    @pyqtSlot()
    def seven_date_mcps(self):
        '''
        干流梯级电站
        :return:
        '''
        time.sleep(2.5)
        if not os.path.exists('data/东江水情日数据'):
            os.makedirs('./data/东江水情日数据')
        else:
            pass
        paths_zhujiang_con = './data/东江水情日数据/'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }
        name_csv = paths_zhujiang_con + today_current + '_Dongjiang_7 day Plant.csv'
        name_xlsx = paths_zhujiang_con + today_current + '_Dongjiang_7 day Plant.xlsx'

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['测站名称', '时间', '坝上水位', '坝下水位', '发电流量'])

        mainstream_cascade_power_station = 'http://113.107.242.152/DJPublic/publicDataC/getWasData24?_dc={}&from={}&to={}&page=1&start=0&limit=25&sort=%5B%7B%22property%22%3A%22stcd%22%2C%22direction%22%3A%22ASC%22%7D%2C%7B%22property%22%3A%22tm%22%2C%22direction%22%3A%22DESC%22%7D%5D'.format(
            current_milli_time, yesterday_seven, today)
        mcps = requests.get(mainstream_cascade_power_station, headers=headers).text
        mcps_datas = json.loads(mcps)

        for mcps_data in mcps_datas:
            station_name = mcps_data['stnm']
            timeStamp = mcps_data['tm']
            station_time = self.changetime(timeStamp)

            upz = mcps_data['upz']

            if upz == None:
                upz_water = ""
            else:
                upz_water = upz

            dwz = mcps_data['dwz']
            if dwz == None:
                dwz_water = ""
            else:
                dwz_water = dwz

            tgtq = mcps_data['tgtq']
            if tgtq == None:
                tgtq_water = ""
            else:
                tgtq_water = int(np.round(tgtq))

            csv_writes.writerow([station_name, station_time, upz_water, dwz_water, tgtq_water])
        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)

        logger.info(today_current + '----东江7日干流梯级电站已经完成')

        self.on_seven_date_mcps.emit(today_current + '----东江7日干流梯级电站已经完成')


    @pyqtSlot()
    def seven_date_il(self):
        '''
        重要咸情
        :return:
        '''
        time.sleep(2.5)
        if not os.path.exists('data/东江水情日数据'):
            os.makedirs('./data/东江水情日数据')
        else:
            pass
        paths_zhujiang_con = './data/东江水情日数据/'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }
        name_csv = paths_zhujiang_con + today_current + '_Dongjiang_7 day Salt.csv'
        name_xlsx = paths_zhujiang_con + today_current + '_Dongjiang_7 day Salt.xlsx'

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['测站名称', '时间', '含氯度'])

        import_love = 'http://113.107.242.152/DJPublic/publicDataC/getSltData24?_dc={}&from={}&to={}&page=1&start=0&limit=25&sort=%5B%7B%22property%22%3A%22stcd%22%2C%22direction%22%3A%22ASC%22%7D%2C%7B%22property%22%3A%22tm%22%2C%22direction%22%3A%22DESC%22%7D%5D'.format(
            current_milli_time, yesterday_seven, today)
        il = requests.get(import_love, headers=headers).text
        il_datas = json.loads(il)
        for il_data in il_datas:
            station_name = il_data['stnm']
            timeStamp = il_data['tm']
            station_time = self.changetime(timeStamp)

            chlrn = il_data['chlrn']

            if chlrn == None:
                chlrn_water = ""
            else:
                chlrn_water = np.round(chlrn, 1)

            csv_writes.writerow([station_name, station_time, chlrn_water])

        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)

        logger.info(today_current + '-----东江7日重要咸情已经完成')

        self.on_seven_date_il.emit(today_current + '-----东江7日重要咸情已经完成')

    @pyqtSlot()
    def seven_date_arc(self):
        # 来电区间面平均雨量
        time.sleep(2.5)

        if not os.path.exists('data/东江水情日数据'):
            os.makedirs('./data/东江水情日数据')
        else:
            pass
        paths_zhujiang_con = './data/东江水情日数据/'

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 QIHU 360SE/12.3.1098.0'

        }
        name_csv = paths_zhujiang_con + today_current + '_Dongjiang_7 day Rain.csv'
        name_xlsx = paths_zhujiang_con + today_current + '_Dongjiang_7 day Rain.xlsx'

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['测站名称', '时间', '平均雨量'])

        avg_rain_call = 'http://113.107.242.152/DJPublic/publicDataC/getPptnAvgData24?_dc={}&from={}&to={}&page=1&start=0&limit=25'.format(
            current_milli_time, yesterday_seven, today)

        arc = requests.get(avg_rain_call, headers=headers).text
        arc_datas = json.loads(arc)
        for arc_data in arc_datas:
            station_name = arc_data['wfnm']
            timeStamp = arc_data['tm']
            station_time = self.changetime(timeStamp)

            avdrp = arc_data['avdrp']
            if avdrp == None:
                avdrp_water = ""
            else:
                avdrp_water = avdrp

            csv_writes.writerow([station_name, station_time, avdrp_water])

        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)

        logger.info(today_current + '----东江7日区间面平均雨量已经完成')

        self.on_seven_date_arc.emit(today_current + '----东江7日区间面平均雨量已经完成')


    @pyqtSlot()
    def yellow(self):
        if not os.path.exists('data/黄河水情日数据'):
            os.makedirs('./data/黄河水情日数据')
        else:
            pass
        paths_huanghe_con = './data/黄河水情日数据'

        time.sleep(1.5)
        name_csv = '{}/{}_Huanghe_Daily_Water.csv'.format(paths_huanghe_con, today_current)
        name_xlsx = '{}/{}_Huanghe_Daily_Water.xlsx'.format(paths_huanghe_con, today_current)

        files = open(name_csv, 'w', encoding='utf_8_sig', newline='')
        csv_writes = csv.writer(files)
        csv_writes.writerow(['河名', '站名', '水位', '流量', '含沙量', '日期'])
        url = 'http://61.163.88.227:8006/hwsq.aspx'
        data = {
            'ctl00$ScriptManager1': 'ctl00$ScriptManager1|ctl00$ContentLeft$Button1'
            , '__EVENTTARGET': ''
            , '__EVENTARGUMENT': ''
            , 'ctl00$ContentLeft$menuDate1$TextBox11': '{}'.format(today_current)
            ,
            '__VIEWSTATE': '/wEPDwULLTEwMDI5NzA1NzkPZBYCZg9kFgICAw9kFgICBQ9kFgJmD2QWAgIBD2QWAgIBDxYCHglpbm5lcmh0bWwFyEw8dGFibGUgd2lkdGg9Ijk4JSIgYm9yZGVyPSIwIiBjZWxscGFkZGluZz0iMCIgY2VsbHNwYWNpbmc9IjEiIGJnY29sb3I9IiNEMUREQUEiIGFsaWduPSJjZW50ZXIiPjx0cj48dGQgaGVpZ2h0PSI0MCIgYmFja2dyb3VuZD0ic2tpbi9pbWFnZXMvbmV3bGluZWJnMy5naWYiPjx0YWJsZSB3aWR0aD0iOTglIiBib3JkZXI9IjAiIGNlbGxzcGFjaW5nPSIwIiBjZWxscGFkZGluZz0iMCI+PHRyPjx0ZCBhbGlnbj0iY2VudGVyIj48ZGl2IGNsYXNzPSdmaXJzdFRpdGxlJz7msLTmg4Xml6XmiqU8L2Rpdj48ZGl2IGNsYXNzPSdzZWNUaXRsZSc+MjAyMC0xMS0wMjwvZGl2PjwvdGQ+PC90cj48L3RhYmxlPjwvdGQ+PC90cj48L3RhYmxlPjx0YWJsZSB3aWR0aD0iOTglIiBib3JkZXI9IjAiIGNlbGxwYWRkaW5nPSIyIiBjZWxsc3BhY2luZz0iMSIgYmdjb2xvcj0iI0QxRERBQSIgYWxpZ249ImNlbnRlciIgc3R5bGU9Im1hcmdpbi10b3A6OHB4IiBjbGFzcz0ibWFpblR4dCI+PHRyPjx0ZCB3aWR0aD0iNTAlIj48dGFibGUgd2lkdGg9IjEwMCUiIGJvcmRlcj0iMCIgY2VsbHBhZGRpbmc9IjIiIGNlbGxzcGFjaW5nPSIxIiBiZ2NvbG9yPSIjRDFEREFBIiBhbGlnbj0iY2VudGVyIiBzdHlsZT0ibWFyZ2luLXRvcDo4cHgiIGNsYXNzPSJtYWluVHh0Ij48VFIgYWxpZ249J2NlbnRlcicgYmdjb2xvcj0nI0U3RTdFNycgaGVpZ2h0PScyMicgY2xhc3M9J3RhYmxlVGl0bGUnID48VEQgd2lkdGg9IjE1JSIgc3R5bGU9ImZvbnQtc2l6ZToxMXB0OyI+5rKz5ZCNPC9URD48VEQgd2lkdGg9IjI1JSIgc3R5bGU9ImZvbnQtc2l6ZToxMXB0OyI+56uZ5ZCNPC9URD48VEQgd2lkdGg9IjIwJSIgc3R5bGU9ImZvbnQtc2l6ZToxMXB0OyI+5rC05L2NPC9URD48VEQgd2lkdGg9IjIwJSIgc3R5bGU9ImZvbnQtc2l6ZToxMXB0OyI+5rWB6YePPC9URD48VEQgd2lkdGg9IjIwJSIgc3R5bGU9ImZvbnQtc2l6ZToxMXB0OyI+5ZCr5rKZ6YePPC9URD48L1RSPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7llJDkuYPkuqUgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD4yNjcyLjU3PC90ZD48dGQ+MTA5MDwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPum+mee+iuWzoeWFpeW6kzwvdGQ+PHRkPi08L3RkPjx0ZD4xMDYwPC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPum7hOaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+6b6Z576K5bOh6JOE5rC06YePPC90ZD48dGQ+MjYwMC44NDwvdGQ+PHRkPigyNDYp5Lq/PC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPum7hOaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+6b6Z576K5bOh5Ye65bqTPC90ZD48dGQ+LTwvdGQ+PHRkPjkxNzwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuWImOWutuWzoeWFpeW6kzwvdGQ+PHRkPi08L3RkPjx0ZD4xMDQwPC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPum7hOaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5YiY5a625bOh6JOE5rC06YePPC90ZD48dGQ+MTcyMi44MzwvdGQ+PHRkPigyNS4yKeS6vzwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuWImOWutuWzoeWHuuW6kzwvdGQ+PHRkPi08L3RkPjx0ZD4xNzkwPC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPum7hOaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5YWw5beeICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD4xNTEyLjYyPC90ZD48dGQ+MjE4MDwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuS4i+ays+ayvyAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjEyMzEuMzwvdGQ+PHRkPjIwNDA8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7nn7PlmLTlsbEgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD4xMDg3LjIxPC90ZD48dGQ+MTg4MDwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuW3tOW9pumrmOWLkiAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD4xMDQ5LjY3PC90ZD48dGQ+MTExMDwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuS4iea5luays+WPoyAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD4xMDE3LjI4PC90ZD48dGQ+MTEwMDwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuWMheWktCAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+MTAwMi41PC90ZD48dGQ+MTMyMDwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuWktOmBk+aLkCAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjk4Ny4zNzwvdGQ+PHRkPjEyNjA8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7kuIflrrblr6jok4TmsLTph488L3RkPjx0ZD45NzQuODc8L3RkPjx0ZD4oNC4xMSnkur88L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7kuIflrrblr6jkuIrlh7rlupM8L3RkPjx0ZD4tPC90ZD48dGQ+MTY4MDwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuS4h+WutuWvqOS4iyAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD45MDAuNjc8L3RkPjx0ZD4xNjgwPC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPum7hOaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5bqc6LC3ICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD44MDguNzU8L3RkPjx0ZD4xMDkwPC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPum7hOaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5ZC05aChICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD42MzYuMDc8L3RkPjx0ZD41NzE8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7pvpnpl6ggICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjM3Ny42ODwvdGQ+PHRkPjk1MDwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7msb7msrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuays+a0pSAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+MzcwLjcyPC90ZD48dGQ+MjI8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+5YyX5rSb5rKzICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+54q25aS0ICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD4zNjAuNzY8L3RkPjx0ZD4xNS40PC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPuazvuaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5byg5a625bGxICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+NDE5Ljk2PC90ZD48dGQ+MjMuNDwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7muK3msrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuWSuOmYsyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+Mzc3LjAxPC90ZD48dGQ+MTM3PC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPua4reaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5Y2O5Y6/ICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD4zMzQuODY8L3RkPjx0ZD4yNjg8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7mvbzlhbMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjMyNi4yOTwvdGQ+PHRkPjE2NjA8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7lj7Llrrbmu6nok4TmsLTph488L3RkPjx0ZD4zMTcuNzY8L3RkPjx0ZD4oNS44NSnkur88L3RkPjx0ZD4tPC90ZD48L3RyPjwvdGFibGU+PC90ZD48dGQgd2lkdGg9IjUwJSI+PHRhYmxlIHdpZHRoPSIxMDAlIiBib3JkZXI9IjAiIGNlbGxwYWRkaW5nPSIyIiBjZWxsc3BhY2luZz0iMSIgYmdjb2xvcj0iI0QxRERBQSIgYWxpZ249ImNlbnRlciIgc3R5bGU9Im1hcmdpbi10b3A6OHB4IiBjbGFzcz0ibWFpblR4dCI+PFRSIGFsaWduPSdjZW50ZXInIGJnY29sb3I9JyNFN0U3RTcnIGhlaWdodD0nMjInIGNsYXNzPSd0YWJsZVRpdGxlJyA+PFREIHdpZHRoPSIxNSUiIHN0eWxlPSJmb250LXNpemU6MTFwdDsiPuays+WQjTwvVEQ+PFREIHdpZHRoPSIyNSUiIHN0eWxlPSJmb250LXNpemU6MTFwdDsiPuermeWQjTwvVEQ+PFREIHdpZHRoPSIyMCUiIHN0eWxlPSJmb250LXNpemU6MTFwdDsiPuawtOS9jTwvVEQ+PFREIHdpZHRoPSIyMCUiIHN0eWxlPSJmb250LXNpemU6MTFwdDsiPua1gemHjzwvVEQ+PFREIHdpZHRoPSIyMCUiIHN0eWxlPSJmb250LXNpemU6MTFwdDsiPuWQq+aymemHjzwvVEQ+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuS4iemXqOWzoSAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjI3NC43MjwvdGQ+PHRkPjEyNTA8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7lsI/mtarlupXkuIrok4TmsLTph488L3RkPjx0ZD4yNTMuNDg8L3RkPjx0ZD4oNDQuOSnkur88L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7lsI/mtarlupUgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD4xMzMuODY8L3RkPjx0ZD42NTI8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+5LyK5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7kuJzmub4gICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPi08L3RkPjx0ZD4tPC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPuS8iuaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+6ZmG5rWR5Z2d5LiK6JOE5rC06YePPC90ZD48dGQ+MzE0LjIxPC90ZD48dGQ+KDQuNjgp5Lq/PC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPuS8iuaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+6ZmG5rWR5Z2d5LiK5Ye65bqTPC90ZD48dGQ+LTwvdGQ+PHRkPi08L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+5LyK5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7pvpnpl6jplYcgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD4xNDcuMzc8L3RkPjx0ZD44Ljk1PC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPua0m+aysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5Y2i5rCPICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD41NTAuMDc8L3RkPjx0ZD40LjgwPC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPua0m+aysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5pWF5Y6/5rC05bqT6JOE5rC06YePPC90ZD48dGQ+NTMwLjA3PC90ZD48dGQ+KDUuMzQp5Lq/PC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPua0m+aysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5pWF5Y6/5rC05bqT5Ye65bqTPC90ZD48dGQ+LTwvdGQ+PHRkPi08L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+5rSb5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7plb/msLTvvIjkuozvvIkgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjM3Ny45MTwvdGQ+PHRkPjIuNjI8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+5rSb5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7nmb3pqazlr7ogICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD4xMTIuNjQ8L3RkPjx0ZD4xNC4zPC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPuS8iua0m+aysyAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPum7keefs+WFsyAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjEwNC42NTwvdGQ+PHRkPjMwLjY8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+5Li55rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7lsbHot6/lnaogICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD4tPC90ZD48dGQ+LTwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7msoHmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuS6lOm+meWPoyAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjE0MS40MzwvdGQ+PHRkPjkuNTA8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+5rKB5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7mrabpmZ8gICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjk4LjM0PC90ZD48dGQ+NS4xOTwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuiKseWbreWPoyAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjg4LjUzPC90ZD48dGQ+NzEyPC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPum7hOaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5aS55rKz5rupICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+NzEuNDE8L3RkPjx0ZD44NDU8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7pq5jmnZEgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjU3LjUzPC90ZD48dGQ+ODA1PC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPum7hOaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5a2Z5Y+jICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD40Mi44NTwvdGQ+PHRkPjgxNDwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7lpKfmsbbmsrMgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7miLTmnZHlnZ08L3RkPjx0ZD4tPC90ZD48dGQ+LTwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7kuJzlubPmuZYgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7kuJzlubPmuZbogIHmuZbok4TmsLTph488L3RkPjx0ZD40MS40OTwvdGQ+PHRkPig1LjMzKeS6vzwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7lpKfmsbbmsrMgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7lh7rmuZbpl7g8L3RkPjx0ZD4tPC90ZD48dGQ+LTwvdGQ+PHRkPi08L3RkPjwvdHI+PHRyIGFsaWduPSJjZW50ZXIiIGJnY29sb3I9IiNGRkZGRkYiPjx0ZD7pu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPuiJvuWxsSAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+MzYuMDY8L3RkPjx0ZD44MzU8L3RkPjx0ZD4tPC90ZD48L3RyPjx0ciBhbGlnbj0iY2VudGVyIiBiZ2NvbG9yPSIjRkZGRkZGIj48dGQ+6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD7ms7rlj6MgICAgICAgICAgICAgICAgICAgICAgICAgIDwvdGQ+PHRkPjI0LjUyPC90ZD48dGQ+NzUxPC90ZD48dGQ+LTwvdGQ+PC90cj48dHIgYWxpZ249ImNlbnRlciIgYmdjb2xvcj0iI0ZGRkZGRiI+PHRkPum7hOaysyAgICAgICAgICAgICAgICAgICAgICAgICAgPC90ZD48dGQ+5Yip5rSlICAgICAgICAgICAgICAgICAgICAgICAgICA8L3RkPjx0ZD45PC90ZD48dGQ+NzUwPC90ZD48dGQ+LTwvdGQ+PC90cj48L3RhYmxlPjwvdGQ+PC90cj48L3RhYmxlPmRkMcukZjNjdxhoeeCH9LMvuUeJ8T94QMynmVzG3x7nKk0='
            , '__VIEWSTATEGENERATOR': 'E4DC7756'
            ,
            '__EVENTVALIDATION': '/wEdAAMT20uVlEkupoFlCOwtNDVc9DkLBAR+UXBBGQ1m5cY+HY5Ggl8DGIT46Qo2GBY6Yh6tXvIcYoroBtjknvKJd4ATZlSgAOM4C9NqzoaMSMxR4Q=='
            , '__ASYNCPOST': 'true'
            , 'ctl00$ContentLeft$Button1': '查询'

        }
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'
            , 'Host': '61.163.88.227:8006'
            , 'Origin': 'http://61.163.88.227:8006'
            , 'Referer': 'http://61.163.88.227:8006/hwsq.aspx'
            }
        try:
            html = requests.post(url, data=data, headers=headers).text
        except requests.exceptions.ConnectionError:
            time.sleep(20)
            html = requests.post(url, data=data, headers=headers).text
        bs = BeautifulSoup(html, 'html.parser')
        divs = str(bs.select('div')[0])

        datas = etree.HTML(divs, etree.HTMLParser())
        web_time = datas.xpath('//div[@class="secTitle"]/text()')[0]
        print(web_time)

        trs = datas.xpath('//tr[@bgcolor="#FFFFFF"]')
        for tr in trs:
            try:
                name = tr.xpath('td[1]/text()')[0].strip()
            except IndexError:
                name = ""
            try:
                side = tr.xpath('td[2]/text()')[0].strip()
            except IndexError:
                side = ""
            try:
                water_level = tr.xpath('td[3]/text()')[0].strip()
            except IndexError:
                water_level = ""
            try:
                flow = tr.xpath('td[4]/text()')[0].strip()
            except IndexError:
                flow = ""
            try:
                sand = tr.xpath('td[5]/text()')[0].strip()
            except IndexError:
                sand = ""

            csv_writes.writerow([name, side, water_level, flow, sand, web_time])
        files.close()
        csv_data = pd.read_csv(name_csv, encoding='utf_8_sig')
        csv_data.to_excel(name_xlsx, index_label=False, index=False)

        self.on_yellow.emit(today_current + '-----水情日报黄河 已经完成')