# import json
# import smtplib
# import time
# from email.header import Header
# from email.mime.text import MIMEText
# import requests
# from selenium import webdriver
# from confluence_api import ConfluenceApi
# import urllib3
#
# urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# # 配置设置
# c = ConfluenceApi(
#     'https://ar.intra.nsfocus.com',
#     'ar1',
#     'tpo@31580')
# options = webdriver.ChromeOptions()
# options.add_argument('--ignore-certificate-errors')
# browser = webdriver.Chrome(chrome_options=options, executable_path="chromedriver.exe")
# # 保持最大窗口
# browser.maximize_window()
# # 跳转到outlook
#
# browser.get("https://mail.intra.nsfocus.com/")
# time.sleep(3)
# # 输入账号
# browser.find_elements_by_id('username')[0].send_keys('yangyue6@intra.nsfocus.com')
# # 输入密码
# browser.find_elements_by_id('password')[0].send_keys('bnfMYN627')
# time.sleep(1)
# # 点击登录按钮
# browser.find_elements_by_class_name('signinTxt')[0].click()
# time.sleep(3)
# cookies = 'QuantumMetricUserID=263793eb82d381e580cd9e89f80a7453; ' \
#           '_ga_PMXYWTHPVN=GS1.1.1664514653.39.1.1664514675.38.0.0; PrivateComputer=true; ' \
#           '_ga=GA1.2.1837734744.1662648581; _ga_Y7CNRMFF6J=GS1.1.1667804213.28.1.1667807038.57.0.0; '
# x_canary = ''
# # list_cookies = list(cookies)
# req = browser.get_cookies()
# for r in range(0, len(req)):
#     c_token = req[r]['name'] + '=' + req[r]['value'] + ';'
#     cookies = cookies + c_token
#     if req[r]['name'] == 'X-OWA-CANARY':
#         x_canary = x_canary.join(req[r]['value'])
#         # print(x_canary, '---------canary')
#     # print(c_token, '---------', r)
#     # print(cookies, '---------', r)
# # print(cookies)
# # print(x_canary)
# time.sleep(3)
# # 判断是否有未读邮件
# no_read = browser.find_elements_by_xpath('/html/body/div[2]/div/div[3]/div[5]/div/div[1]/div/div[1]/div[2]/div/div[5]/'
#                                          'div[2]/div/div/div[2]/div/div/div[1]/div[2]/div[2]/div/div/div[2]/div[1]/div/'
#                                          'div[1]/span/div/div[2]/span')[0].text.strip()
# # print(no_read,'-----', type(no_read))
# # print(no_read.isspace())
#
#
# # 获取收件箱内邮件xpath
# from_xpath_a = "/html/body/div[2]/div/div[3]/div[5]/div/div[1]/div/div[5]/div[3]/div/div[2]/div/div/div/div[5]/div[3]/" \
#                "div[1]/div[3]/div[1]/div/div/div[2]"
# # 无未读邮件
# if len(no_read) == 0:
#     print('1--暂无未读信息')
# # 有未读邮件
# elif int(no_read) != 0:
#     headers = {
#         'authority': 'mail.intra.nsfocus.com',
#         'method': 'POST',
#         'path': '/owa/service.svc?action=GetConversationItems&EP=1&UA=0&ID=-4&AC=1',
#         'scheme': 'https',
#         'accept': '*/*',
#         'accept-encoding': 'gzip, deflate, br',
#         'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,en-US;q=0.6',
#         'action': 'GetConversationItems',
#         'cache-control': 'no-cache',
#         'client-request-id': '1CE8FB38271D49B7AD10684C434EE998_166813403194306',
#         'content-length': '0',
#         'content-type': 'application/json; charset=UTF-8',
#         'origin': 'https://mail.intra.nsfocus.com',
#         'pragma': 'no-cache',
#         'sec-ch-ua': '"Google Chrome";v="107", "Chromium";v="107", "Not=A?Brand";v="24"',
#         'sec-ch-ua-mobile': '?0',
#         'sec-ch-ua-platform': '"Windows"',
#         'sec-fetch-dest': 'empty',
#         'sec-fetch-mode': 'cors',
#         'sec-fetch-site': 'same-origin',
#         'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',
#         'x-owa-actionid': '-4',
#         'x-owa-actionname': 'GetConversationItemsAction',
#         'x-owa-attempt': '1',
#         'x-owa-clientbuildversion': '15.1.2375.37',
#         'x-owa-urlpostdata': '%7B%22__type%22%3A%22GetConversationItemsJsonRequest%3A%23Exchange%22%2C%22Header%22%3A%7B%22__type%22%3A%22JsonRequestHeaders%3A%23Exchange%22%2C%22RequestServerVersion%22%3A%22V2017_08_18%22%2C%22TimeZoneContext%22%3A%7B%22__type%22%3A%22TimeZoneContext%3A%23Exchange%22%2C%22TimeZoneDefinition%22%3A%7B%22__type%22%3A%22TimeZoneDefinitionType%3A%23Exchange%22%2C%22Id%22%3A%22China%20Standard%20Time%22%7D%7D%7D%2C%22Body%22%3A%7B%22__type%22%3A%22GetConversationItemsRequest%3A%23Exchange%22%2C%22Conversations%22%3A%5B%7B%22__type%22%3A%22ConversationRequestType%3A%23Exchange%22%2C%22ConversationId%22%3A%7B%22__type%22%3A%22ItemId%3A%23Exchange%22%2C%22Id%22%3A%22AAQkAGMwNTlhNTRmLWNlYWQtNDk4YS1iMmJlLWM5MmM5ZDdkN2FmNAAQAJMu2JWOKMbbVzlVVU5eCZw%3D%22%7D%2C%22SyncState%22%3A%22%22%7D%5D%2C%22ItemShape%22%3A%7B%22__type%22%3A%22ItemResponseShape%3A%23Exchange%22%2C%22BaseShape%22%3A%22IdOnly%22%2C%22FilterHtmlContent%22%3Atrue%2C%22BlockExternalImagesIfSenderUntrusted%22%3Atrue%2C%22BlockContentFromUnknownSenders%22%3Afalse%2C%22AddBlankTargetToLinks%22%3Atrue%2C%22ClientSupportsIrm%22%3Atrue%2C%22InlineImageUrlTemplate%22%3A%22data%3Aimage%2Fgif%3Bbase64%2CR0lGODlhAQABAIAAAAAAAP%2F%2F%2FyH5BAEAAAEALAAAAAABAAEAAAIBTAA7%22%2C%22FilterInlineSafetyTips%22%3Atrue%2C%22MaximumBodySize%22%3A2097152%2C%22MaximumRecipientsToReturn%22%3A20%2C%22CssScopeClassName%22%3A%22rps_ccae%22%2C%22InlineImageUrlOnLoadTemplate%22%3A%22InlineImageLoader.GetLoader().Load(this)%22%2C%22InlineImageCustomDataTemplate%22%3A%22%7Bid%7D%22%2C%22ExcludeBindForInlineAttachments%22%3Atrue%2C%22CalculateOnlyFirstBody%22%3Atrue%7D%2C%22ShapeName%22%3A%22ItemPart%22%2C%22SortOrder%22%3A%22DateOrderDescending%22%2C%22MaxItemsToReturn%22%3A20%2C%22FoldersToIgnore%22%3A%5B%7B%22__type%22%3A%22FolderId%3A%23Exchange%22%2C%22ChangeKey%22%3A%22AQAAAA%3D%3D%22%2C%22Id%22%3A%22AQMkAGMwNTlhNTRmLWNlYWQtNDk4YS1iMmJlLWM5MmM5ZDdkN2FmNAAuAAADMQI%2F1FE4DEKS3jj6TDpb%2BAEADZbT%2BxUbLUGakwvGyZVYTwAAAgELAAAA%22%7D%5D%7D%7D',
#         'cookie': cookies,
#         'x-owa-canary': x_canary
#     }
#     params = (
#         ('action', 'GetConversationItems'),
#         ('EP', '1'),
#         ('UA', '0'),
#         ('ID', '-4'),
#         ('AC', '1')
#     )
#     response = requests.post('https://mail.intra.nsfocus.com/owa/service.svc?', params=params, headers=headers)
#     req_json = response.json()
#     # print(req_json)
#     res_html = \
#         req_json['Body']['ResponseMessages']['Items'][0]['Conversation']['ConversationNodes'][1]['QuotedTextList'][0]
#     with open('res_html.html', 'w', encoding='utf-8') as f:
#         f.write(res_html)
#         print('2--写入html页面成功')
#     # 关闭窗口
#     browser.quit()
#     time.sleep(3)
#     print('3--关闭outlook窗口，打开下载的邮件详情页')
#     # 重新操控selenium打开刚刚保存的html页面
#     options = webdriver.ChromeOptions()
#     options.add_argument('--ignore-certificate-errors')
#     browser = webdriver.Chrome(chrome_options=options, executable_path="chromedriver.exe")
#     # 保持最大窗口
#     browser.maximize_window()
#     browser.get('file:///K:/outlook邮件爬取/res_html.html')
#
#     try:
#         # gartner的推送邮件有两种ui，判断是哪一种
#         gar_detail = browser.find_elements_by_xpath('//div[3]/div[2]/table/tbody/tr/td/table/tbody/'
#                                                     'tr/td[1]/table/tbody/tr[2]/td/div/font/span/a/font/span/b/font')
#         # 第一种ui
#         if len(gar_detail) != 0:
#             for i in range(3, 35):
#                 title1 = browser.find_elements_by_xpath('//div[%s]/div[2]/table/tbody/tr/td/table/tbody/tr/'
#                                                         'td[1]/table/tbody/tr[2]/td/div/font/'
#                                                         'span/a/font/span/b/font' % i)
#                 title2 = browser.find_elements_by_xpath('//div[%s]/table/tbody/tr/td/table/tbody/'
#                                                         'tr/td[1]/table/tbody/tr[2]/td/div/font/span/a/font/'
#                                                         'span/b/font' % i)
#                 # 判断标题xpath是否正确
#                 if len(title1) != 0:
#                     # 获取标题
#                     title1_1 = title1[0].text.strip()
#                     # 获取文章链接
#                     link1_1 = browser.find_elements_by_xpath('//div[%s]/div[2]/table/tbody/tr/td/table/'
#                                                              'tbody/tr/td[1]/table/tbody/'
#                                                              'tr[2]/td/div/font/span/a' % i)[0].get_attribute('href')
#                     # 查询是否存在ar阅览室
#                     have = c.search(title=title1_1, spaceKey='AR')['results']
#                     # 如果不存在就调用爬虫接口，开始爬取
#                     if not have:
#                         print('4--文章未存在，开始爬取')
#                         data = json.dumps({'link': link1_1})
#                         req = requests.post('http://192.168.19.16:9102/gartnerPdfSpider', data=data)
#                         print('5--爬虫方响应内容：', req, '-----------', req.json())
#                         # 提取ar阅览室链接
#                         ar_url = req.json()['message']
#                         print('6--ar阅览室链接：', ar_url)
#                         # 替换对应的原文链接为ar阅览室链接
#                         link1_1 = link1_1.replace('&', '&amp;')
#                         with open('res_html.html', 'r', encoding='utf-8') as f:
#                             data = f.read()
#                             data = data.replace(link1_1, 'www.baidu.com')
#                         with open('res_html.html', 'w', encoding='utf-8') as f:
#                             f.write(data)
#                         print('7--替换文章链接成功, 开始下一篇文章')
#                 elif len(title2) != 0:
#                     # 使用新的xpath获取标题和文章链接
#                     title1_2 = title2[0].text.strip()
#                     link1_2 = browser.find_elements_by_xpath('//div[%s]/table/tbody/tr/td/table/tbody/'
#                                                              'tr/td[1]/table/tbody/tr[2]/td/div/font/'
#                                                              'span/a' % i)[0].get_attribute('href')
#                     # 查询是否存在ar阅览室
#                     have = c.search(title=title1_2, spaceKey='AR')['results']
#                     # 如果不存在就调用爬虫接口，开始爬取
#                     if not have:
#                         print('4--文章未存在，开始爬取')
#                         data = json.dumps({'link': link1_2})
#                         req = requests.post('http://192.168.19.16:9102/gartnerPdfSpider', data=data)
#                         print('5--爬虫方响应内容：', req, '-----------', req.json())
#                         # 提取ar阅览室链接
#                         ar_url = req.json()['message']
#                         print('6--ar阅览室链接：', ar_url)
#                         # 替换对应的原文链接为ar阅览室链接
#                         link1_2 = link1_2.replace('&', '&amp;')
#                         with open('res_html.html', 'r', encoding='utf-8') as f:
#                             data = f.read()
#                             data = data.replace(link1_2, 'www.baidu.com')
#                         with open('res_html.html', 'w', encoding='utf-8') as f:
#                             f.write(data)
#                         print('7--替换文章链接成功, 开始下一篇文章')
#
#                 elif len(title1) == 0 and len(title2) == 0:
#                     continue
#
#             print('8--所有文章爬取完毕')
#             # 准备发送邮件
#             try:
#                 # 配置信息
#                 smtp_server = 'mail.intra.nsfocus.com'
#                 user = 'yangyue6@intra.nsfocus.com'
#                 passwd = 'bnfMYN627'
#                 # 设置发件人和接收人
#                 sender = 'yangyue6@intra.nsfocus.com'
#                 receiver = 'yangyue6@intra.nsfocus.com'
#                 # 主题内容
#                 subject = '内容'
#                 # 邮件主体
#                 file = open('1.html', 'r', encoding='utf-8')
#                 test_repost = file.read()
#                 file.close()
#                 msg = MIMEText(test_repost, 'html', 'utf-8')
#                 msg['Subject'] = Header(subject, 'utf-8')
#                 # 连接服务器
#                 smtp = smtplib.SMTP()
#                 smtp.connect(smtp_server)
#                 # 登录
#                 smtp.login(user, passwd)
#                 # 发送邮件
#                 smtp.sendmail(sender, receiver, msg.as_string())
#                 smtp.quit()
#                 print('9--邮件发送成功')
#             except Exception as e:
#                 print(e, '发送邮件出错')
#     except Exception as e:
#         print(e)
#
#
#     # for i in range(2, int(no_read) + 2):
#     #     # 获取发件人的xpath
#     #     from_xpath = from_xpath_a + ("/div[%s]/div[2]/div[3]/div[1]/span/span" % i)
#     #     # 使用xpath获取发件人
#     #     try:
#     #         from_name = browser.find_elements_by_xpath(from_xpath)
#     #         # print(from_name)
#     #         if from_name[0].text.strip() == '王强':
#     #             print('点击未读邮件')
#     #             # 点击未读邮件，进入邮件详情页
#     #             browser.find_elements_by_xpath(from_xpath)[0].click()
#     #             time.sleep(5)
#     #             # browser.find_elements_by_xpath('//*[@id="primaryContainer"]/div[5]/div/div[1]/div/div[5]/div[3]/div/div[5]/div[1]/div/div/div[3]/div[2]/div[2]/div[10]/div/div/button')[0].click()
#     #             # time.sleep(3)
#     #             # gartner的推送邮件有两种ui，判断是哪一种
#     #             gar_detail = browser.find_elements_by_xpath('//*[@id="Conversation.FossilizedTextBody"]/div/div/div/'
#     #                                                         'span/div/div[6]/table/tbody/tr/td/div[3]/div[2]/table/'
#     #                                                         'tbody/tr[1]/td/table/tbody/tr/td/table/tbody/tr[1]/td/div/'
#     #                                                         'font/span/a[2]/font/span')
#     #             # 第一种ui
#     #             if len(gar_detail) == 0:
#     #                 for j in range(3, 35):
#     #                     title1 = browser.find_elements_by_xpath('//div[%s]/div[2]/table/tbody/tr/td/table/'
#     #                                                             'tbody/tr/td[1]/table/tbody/tr[2]/td/div/font/'
#     #                                                             'span/a/font/span/b/font' % j)
#     #                     # xpath正确
#     #                     if len(title1) != 0:
#     #                         # 获取元素内容
#     #                         title1_1 = title1[0].text.strip()
#     #
#     #                         link1_1 = browser.find_elements_by_xpath('//div[%s]/div[2]/table/tbody/tr/td/table/'
#     #                                                                  'tbody/tr/td[1]/table/tbody/tr[2]/td/div/font/'
#     #                                                                  'span/a' % j)[0].get_attribute('href')
#     #                         # 查询是否存在ar阅览室
#     #                         have = c.search(title=title1_1, spaceKey='AR')['results']
#     #                         # 如果不存在就调用爬虫接口，开始爬取
#     #                         if not have:
#     #                             print('文章未存在，开始爬取')
#     #                             # data = json.dumps({'link': link1_1})
#     #                             # req = requests.post('http://192.168.19.16:9102/gartnerPdfSpider', data=data)
#     #                             # print(req, '-----------', req.status_code)
#     #                             # time.sleep(5)
#     #                             # 替换文章链接
#     #                             # 右键点击当前邮件
#     #                             ActionChains(browser).context_click(from_name[0]).perform()
#     #                             # 弹出窗口，点击转发
#     #                             time.sleep(2)
#     #                             browser.find_element_by_xpath('/html/body/div[12]/div/div/div/div/div[3]/'
#     #                                                           'button/div/span[2]').click()
#     #                             time.sleep(5)
#     #                             # 右侧弹出转发窗口，写入收件人
#     #                             # browser.find_elements_by_xpath('//*[@id="primaryContainer"]/div[5]/div/div[1]/div/'
#     #                             #                                'div[5]/div[3]/div/div[5]/div[1]/div/div[3]/div[4]/'
#     #                             #                                'div/div[1]/div[2]/div[2]/div[1]/div[1]/div[2]/div[2]/'
#     #                             #                                'div[1]/div/div/div/span/div[1]/form/'
#     #                             #                                'input')[0].send_keys('wangqiang@nsfocus.com')
#     #                             # 获取文章标题位置
#     #                             tr_title = browser.find_elements_by_css_selector('#primaryContainer > div:nth-child(7) > div > div._n_T > div > div._n_X > div:nth-child(3) > div > div._n_Y > div.allowTextSelection > div > div._mcp_T2._mcp_W2 > div._mcp_U2._mcp_W2.customScrollBar.scrollContainer._mcp_Y2 > div > div._mcp_d1.ms-border-color-neutralLight > div._mcp_e1.ms-bg-color-white > div:nth-child(2) > div._mcp_z1.ms-border-color-neutralTertiary-hover.ms-border-color-neutralTertiaryAlt > div._mcp_22 > div > div._z_41.ms-bg-color-white > div:nth-child(1) > div:nth-child(3) > div > div > div:nth-child(3) > div > div:nth-child(7) > div > table > tbody > tr > td > div > table > tbody > tr > td > div:nth-child(%s) > div > table > tbody > tr > td > table > tbody > tr > td:nth-child(1) > table > tbody > tr:nth-child(2) > td > p > b > span > a > span' % j)[0]
#     #                             print(tr_title)
#     #                             if len(title1) != 0:
#     #                                 # 选中当前文章标题
#     #                                 tr_title.click()
#     #                                 tr_title.send_keys(Keys.HOME)
#     #                                 action = ActionChains(browser)
#     #                                 # action.click_and_hold(on_element=None)
#     #                                 action.drag_and_drop_by_offset()
#     #                                 # action.move_by_offset(279, 70)
#     #                                 action.release()
#     #                                 action.perform()
#     #                                 # tr_title.click()
#     #                                 # ActionChains(browser).double_click(on_element=None).perform()
#     #                                 # tr_title.send_keys(Keys.SHIFT, Keys.DOWN)
#     #                                 # tr_title.send_keys(Keys.SHIFT, Keys.DOWN)
#     #                                 # tr_title.send_keys(Keys.SHIFT, Keys.DOWN)
#     #
#     #                         else:
#     #                             print('----该文章已存在')
#     #                     # xpath错误
#     #                     elif len(title1) == 0:
#     #                         try:
#     #                             title1_2 = browser.find_elements_by_xpath('//div[%s]/table/tbody/tr/td/table/tbody/tr/'
#     #                                                                       'td[1]/table/tbody/tr[2]/td/div/font/span/a/'
#     #                                                                       'font/span/b/font' % j)
#     #                             link1_2 = browser.find_elements_by_xpath('//div[%s]/table/tbody/tr/td/table/tbody/tr/'
#     #                                                                      'td[1]/table/tbody/tr[2]/td/'
#     #                                                                      'div/font/span/a' % j)
#     #                             title1_test = title1_2[0].text.strip()
#     #                             link1_test = link1_2[0].get_attribute('href')
#     #                             # 查询是否存在ar阅览室
#     #                             have = c.search(title=title1_test, spaceKey='AR')['results']
#     #                             # 如果不存在就调用爬虫接口，开始爬取
#     #                             if not have:
#     #                                 print('文章未存在，开始爬取')
#     #                                 data = json.dumps({'link': link1_test})
#     #                                 req = requests.post('http://192.168.19.16:9102/gartnerPdfSpider', data=data)
#     #                                 print(req.status_code)
#     #                                 time.sleep(5)
#     #                             else:
#     #                                 print('----该文章已存在')
#     #                         except Exception as e:
#     #                             print(e, 'title1_3')
#     #                             if str(e) == 'list index out of range':
#     #                                 title1_3 = browser.find_elements_by_xpath('//div[%s]/table/tbody/tr/td/table/'
#     #                                                                           'tbody/tr/td/table/tbody/tr[2]/td/div/'
#     #                                                                           'font/span/a/'
#     #                                                                           'font/span/b/font' % j)[0].text.strip()
#     #                                 link1_3 = browser.find_elements_by_xpath('//div[%s]/table/tbody/tr/td/table/'
#     #                                                                          'tbody/tr/td/table/tbody/tr[2]/td/div/'
#     #                                                                          'font/span/a' % j)[0].get_attribute('href')
#     #                                 # 查询是否存在ar阅览室
#     #                                 have = c.search(title=title1_3, spaceKey='AR')['results']
#     #                                 # 如果不存在就调用爬虫接口，开始爬取
#     #                                 if not have:
#     #                                     print('文章未存在，开始爬取')
#     #                                     data = json.dumps({'link': link1_3})
#     #                                     req = requests.post('http://192.168.19.16:9102/gartnerPdfSpider', data=data)
#     #                                     print(req.status_code)
#     #                                     time.sleep(5)
#     #                                 else:
#     #                                     print('----该文章已存在')
#     #
#     #             # 第二种ui
#     #             elif len(gar_detail) != 0:
#     #                 # 获取邮件详情页xpath
#     #                 for z in range(11, 16, 2):
#     #                     # 获取文章标题
#     #                     title2 = browser.find_elements_by_xpath('//*[@id="Conversation.FossilizedTextBody"]/div/div/'
#     #                                                             'div/span/div[1]/div[%s]/table/tbody/tr/td/div/table/'
#     #                                                             'tbody/tr/td/div/table/tbody/tr[1]/td/div/table/tbody/'
#     #                                                             'tr/td/div/div/table/tbody/tr/td/div/table/tbody/tr/'
#     #                                                             'td/table/tbody/tr/td/table/tbody/tr[1]/td/div/'
#     #                                                             'font/span/a/font/span/b/font' % z)[0].text.strip()
#     #
#     #                     # 获取文章链接
#     #                     link2 = browser.find_elements_by_xpath('//*[@id="Conversation.FossilizedTextBody"]/div/div/'
#     #                                                            'div/span/div[1]/div[%s]/table/tbody/tr/td/div/'
#     #                                                            'table/tbody/tr/td/div/table/tbody/tr[1]/td/div/'
#     #                                                            'table/tbody/tr/td/div/div/table/tbody/tr/td/div/'
#     #                                                            'table/tbody/tr/td/table/tbody/tr/td/table/tbody/tr[1]/'
#     #                                                            'td/div/font/span/a' % z)[0].get_attribute('href')
#     #                     print(title2, link2)
#     #                     # 在ar阅览室中搜索该文章
#     #                     have = c.search(title=title2, spaceKey='AR')['results']
#     #                     # ar阅览室中不存在该文章
#     #                     if not have:
#     #                         print('文章未存在，开始爬取')
#     #                         data = json.dumps({'link': link2})
#     #                         req = requests.post('http://192.168.19.16:9102/gartnerPdfSpider', data=data)
#     #                         print(req, '-----------', req.status_code)
#     #                         time.sleep(5)
#     #                     else:
#     #                         print(title2, '----该文章已存在')
#     #     except Exception as e:
#     #         print(e)
#     #         if str(e) == 'list index out of range':
#     #             print('当天邮件爬取完毕，正在停止程序。。。')
#
# # browser.quit()
