import base64
import json
import poplib
import quopri
import re
import smtplib
import time
from email.header import Header
from email.mime.text import MIMEText
from email.parser import Parser

import requests
from selenium import webdriver
from confluence_api import ConfluenceApi
import urllib3

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# 配置ar阅览室设置
c = ConfluenceApi(
    'https://ar.intra.nsfocus.com',
    'ar1',
    'tpo@31580')


def down_mail():
    """
    1.连接ar阅览室
    2.启动selenium
    3.登录outlook
    4.获取邮件详情
    5.保存邮件详情到本地html
    :return:
    """
    # options = webdriver.ChromeOptions()
    # options.add_argument('--ignore-certificate-errors')
    # browser = webdriver.Chrome(chrome_options=options, executable_path="chromedriver.exe")
    # # 保持最大窗口
    # browser.maximize_window()
    # 跳转到outlook

    # browser.get("https://mail.intra.nsfocus.com/")
    # time.sleep(3)
    # # 输入账号
    # browser.find_elements_by_id('username')[0].send_keys('ar@intra.nsfocus.com')
    # # 输入密码
    # browser.find_elements_by_id('password')[0].send_keys('t56yapso!')
    # time.sleep(1)
    # # 点击登录按钮
    # browser.find_elements_by_class_name('signinTxt')[0].click()
    # time.sleep(3)
    # # 判断是否有未读邮件
    # no_read = \
    #     browser.find_elements_by_xpath('/html/body/div[2]/div/div[3]/div[5]/div/div[1]/div/div[1]/div[2]/div/div[5]/'
    #                                    'div[2]/div/div/div[2]/div/div/div[1]/div[2]/div[2]/div/div/div[2]/div[1]/div/'
    #                                    'div[1]/span/div/div[2]/span')[0].text.strip()
    # print(no_read, '-----当前未读信息')
    # print(no_read.isspace())
    # 无未读邮件
    # if len(no_read) == 0:
    #     print('1--暂无未读信息')
    # # 有未读邮件
    # elif int(no_read) != 0:

    # pop_server_host = "mail.intra.nsfocus.com"
    pop_server_host = "mail.nsfocus.com"
    # 邮箱对应的pop服务器的监听端口。改成自己邮箱的pop服务器的端口；qq邮箱不需要修改此值
    pop_server_port = 996
    # 连接pop服务器。如果没有使用SSL，将POP3_SSL()改成POP3()即可其他都不需要做改动
    email_server = poplib.POP3_SSL(host=pop_server_host, timeout=30, port=pop_server_port)
    email_server.user('ar')
    email_server.pass_('u32Te7sAM')
    # 邮箱中其收到的邮件的数量
    # email_count = len(email_server.list()[1])
    # print(email_count, '-----------------count')
    # list()返回所有邮件的编号:
    resp, mails, octets = email_server.list()
    # 遍历所有的邮件
    print('1--检测到邮箱内邮件为%s封，开始运行程序' % len(mails))
    for i in range(len(mails), 4, -1):
        # 通过retr(index)读取第index封邮件的内容；这里读取最后一封，也即最新收到的那一封邮件
        resp, lines, octets = email_server.retr(i)
        # lines是邮件内容，列表形式使用join拼成一个byte变量
        # print(lines)
        email_content = b'\r\n'.join(lines)
        # print(email_content)
        try:
            # 再将邮件内容由byte转成str类型
            email_content = email_content.decode('utf-8')
            # print(email_content)
            # ea = re.match(r"PGh.*==", email_content)
            # print(ea, '-----------------ea')
        except Exception as e:
            print(str(e))
            continue
        # # 将str类型转换成<class 'email.message.Message'>
        # msg = email.message_from_string(email_content)
        msg = Parser().parsestr(email_content)
        sub = msg.get('Subject', '')
        header = sub.split(':')[1]
        print(header, '---主题')
        b_msg = msg.get_payload()
        # html_msg = base64.b64decode(b_msg)
        # html_msg = html_msg.decode('utf-8')
        html_msg = quopri.decodestring(str(b_msg[1])).decode('utf-8')

        # print(html_msg)
        with open('res_html.html', 'w', encoding='utf-8') as f:
            f.write(html_msg)
        print('2--写入html页面成功')
        # 关闭窗口
        # browser.quit()
        time.sleep(3)
        print('3--关闭outlook窗口，打开下载的邮件详情页')

        return header


def main(header):
    # 重新操控selenium打开刚刚保存的html页面
    options = webdriver.ChromeOptions()
    options.add_argument('--ignore-certificate-errors')
    browser = webdriver.Chrome(chrome_options=options, executable_path="chromedriver.exe")
    # 保持最大窗口
    browser.maximize_window()
    browser.get('file:///H:/email_spider/res_html.html')
    # print('-9999999999')
    content = '<html>' \
              '<head>' \
              '<meta http-equiv="Content-Type" content="text/html; charset=utf-8">' \
              '</head>' \
              '<body>' \
              '</body>' \
              '</html>'
    with open('reply.html', 'w', encoding='utf-8') as f:
        f.write(content)
        # print('------1231456')
    try:
        # gartner的推送邮件有三种ui，判断是哪一种
        gar_detail_1 = browser.find_elements_by_xpath('//div[3]/div/table/tbody/tr/td/table/tbody/tr/td[1]/table/'
                                                      'tbody/tr[2]/td/p/b/span/a/span')
        gar_detail_2 = browser.find_elements_by_xpath('//tr[1]/td/table/tbody/tr/td/table/tbody/'
                                                      'tr[1]/td/p/a[2]/span/span')
        gar_detail_3 = browser.find_elements_by_xpath('//tr[1]/td/div/table/tbody/tr/td[1]/table/tbody/'
                                                      'tr[1]/td/p/a[2]/span/span')
        for_detail_1 = browser.find_elements_by_xpath('//table/tbody/tr[4]/'
                                                      'td/table[1]/tbody/tr[2]/td/p/span/a/b/span')
        title_type_1 = 'Gartner推荐阅读'
        title_type_2 = 'Forrester推荐阅读'
        time.sleep(5)
        # 第一种ui
        if len(gar_detail_1) != 0:
            first_ui(browser)
            send_mail(title_type_1, header)
            print('ok')
        # 第二种ui
        elif len(gar_detail_2) != 0:
            second_ui(browser)
            send_mail(title_type_1, header)
            print('ok')
        # 第三种ui
        elif len(for_detail_1) != 0:
            third_ui(browser)
            send_mail(title_type_2, header)
        # 第四种ui
        elif len(gar_detail_3) != 0:
            fourth_ui(browser)
            send_mail(title_type_1, header)
        else:
            print('页面错误')
    except Exception as e:
        print(e)


def send_mail(title_type, head):
    """
    1.删除邮件主体内的多余链接
    2.配置邮件信息
    3.发送邮件
    :return:
    """
    try:
        # with open('res_html.html', 'r', encoding='utf-8') as f:
        #     res_html = f.read()
        #     re_html = re.findall('href="https://app.*target="_blank"', res_html)
        #     # print(re_html)
        #     # a = 0
        #     for i in re_html:
        #         res_html = res_html.replace(i, '')
        #         # a += 1
        #     # print(a)
        # with open('res_html.html', 'w', encoding='utf-8') as f:
        #     f.write(res_html)
        # 配置信息
        smtp_server = 'mail.nsfocus.com'
        user = 'ar'
        passwd = 'u32Te7sAM'
        # 设置发件人和接收人
        sender = 'ar@nsfocus.com'
        receiver = 'yangyue6@nsfocus.com'
        # 主题内容
        # subject = '内容'
        # 邮件主体
        file = open('reply.html', 'r', encoding='utf-8')
        test_repost = file.read()
        file.close()
        m = MIMEText(test_repost, 'html', 'utf-8')
        # 获取邮件主题
        # header = receive_msg.get('Subject', '')
        # header2 = re.findall(r"\?.*?\?", header)
        # 提取邮件主题字符类型
        # char_type = header2[0].replace('?', '')
        # 清洗数据
        # i = header2[1].replace('?', '')
        # i = base64.b64decode(i).decode(char_type)
        # print(i)
        # sub = i + header2[3].replace('?', '')
        # sub = header.split(':')
        time_str = time.strftime("%Y%m%d-%H" + ":" + "%M")
        now_sub = time_str + '-' + title_type + '-' + head
        print(now_sub)
        # 给发送的邮件添加主题
        m['Subject'] = Header(now_sub, 'utf-8')
        m['From'] = Header('ar@nsfocus.com', 'utf-8')
        # 连接服务器
        smtp = smtplib.SMTP()
        smtp.connect(smtp_server)
        # 登录
        smtp.login(user, passwd)
        # 发送邮件
        smtp.sendmail(sender, receiver, m.as_string())
        smtp.quit()
        print('9--邮件发送成功')
    except Exception as e:
        print(e, '9--发送邮件出错')


def send_gartner_spider(title1, link1):
    """
    1.对接gartner爬虫
    2.接收响应
    3.替换文章链接
    :return:
    """
    print('4--文章未存在，开始爬取')
    # link2 = link1.replace('&', '&amp;')
    # link3 = link1.replace('&amp;', '&')
    data = json.dumps({'link': link1})
    # proxy = {
    #     'http':'121.13.252.62'
    # }
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',
        'Connection': 'close'
    }
    # requests.adapters.DEFAULT_RETRIES = 5
    s = requests.session()
    s.keep_alive = False
    req = requests.post('http://192.168.19.16:9111/isGartnerLinkInAR', data=data, headers=headers)
    # req.close()
    # del(req)
    time.sleep(5)
    print('5--爬虫方响应内容：', req, '-----------', req.json())
    # 判断响应类型
    # 响应201表示文章存在ar阅览室，直接替换
    if req.json()['code'] == 201:
        # 提取ar阅览室链接
        ar_url = req.json()['data']
        print('6--ar阅览室链接：', ar_url)
        content = '<div><a href="%s">%s</div>' % (ar_url, title1)
        # 在新的html文件写入ar阅览室链接和文章标题
        with open('reply.html', 'a+', encoding='utf-8') as f:
            f.write(content)
        print('7--替换文章链接成功, 开始下一篇文章')
    # 响应200表示文章不存在ar阅览室，需要等待5分钟，再次调用接口
    elif req.json()['code'] == 200:
        time.sleep(600)
        req = requests.post('http://192.168.19.16:9111/isGartnerLinkInAR', data=data)
        ar_url = req.json()['data']
        print('6--ar阅览室链接：', ar_url)
        content = '<div><a href="%s">%s</div>' % (ar_url, title1)
        # 在新的html文件写入ar阅览室链接和文章标题
        with open('reply.html', 'a+', encoding='utf-8') as f:
            f.write(content)
        print('7--替换文章链接成功, 开始下一篇文章')


def send_Forrester_spider(title1, link1):
    """
    1.对接gartner爬虫
    2.接收响应
    3.替换文章链接
    :return:
    """
    print('4--文章未存在，开始爬取')
    # link2 = link1.replace('&', '&amp;')
    # link3 = link1.replace('&amp;', '&')
    data = json.dumps({'link': link1})
    # proxy = {
    #     'http':'121.13.252.62'
    # }
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',
        'Connection': 'close'
    }
    # requests.adapters.DEFAULT_RETRIES = 5
    s = requests.session()
    s.keep_alive = False
    req = requests.post('http://192.168.19.16:9111/isForresterLinkInAR', data=data, headers=headers)
    # req.close()
    # del(req)
    time.sleep(5)
    print('5--爬虫方响应内容：', req, '-----------', req.json())
    # 判断响应类型
    # 响应201表示文章存在ar阅览室，直接替换
    if req.json()['code'] == 201:
        # 提取ar阅览室链接
        ar_url = req.json()['data']
        print('6--ar阅览室链接：', ar_url)
        content = '<div><a href="%s">%s</div>' % (ar_url, title1)
        # 在新的html文件写入ar阅览室链接和文章标题
        with open('reply.html', 'a+', encoding='utf-8') as f:
            f.write(content)
        print('7--替换文章链接成功, 开始下一篇文章')
    # 响应200表示文章不存在ar阅览室，需要等待5分钟，再次调用接口
    elif req.json()['code'] == 200:
        time.sleep(300)
        req = requests.post('http://192.168.19.16:9111/isForresterLinkInAR', data=data)
        ar_url = req.json()['data']
        print('6--ar阅览室链接：', ar_url)
        content = '<div><a href="%s">%s</div>' % (ar_url, title1)
        # 在新的html文件写入ar阅览室链接和文章标题
        with open('reply.html', 'a+', encoding='utf-8') as f:
            f.write(content)
        print('7--替换文章链接成功, 开始下一篇文章')


def first_ui(browser):
    """
    第一种ui，有两种标题格式，
    且两种格式都在一个for循环里面，所以使用if...elif...进行判断
    :return:
    """
    for i in range(3, 50):
        title1 = browser.find_elements_by_xpath('//div[%s]/div/table/tbody/tr/td/table/tbody/tr/td[1]/table/tbody/'
                                                'tr[2]/td/p/b/span/a/span' % i)
        title2 = browser.find_elements_by_xpath('//div[%s]/table/tbody/tr/td/table/tbody/tr/td[1]/'
                                                'table/tbody/tr[2]/td/p/b/span/a/span' % i)
        # 判断标题xpath是否正确
        if len(title1) != 0:
            # 获取标题
            title1_1 = title1[0].text.strip()
            # 获取文章链接
            link1 = browser.find_elements_by_xpath('//div[%s]/div/table/tbody/tr/td/table/tbody/tr/td[1]/table/tbody/'
                                                   'tr[2]/td/p/b/span/a' % i)[0].get_attribute('href')
            # 查询是否存在ar阅览室
            have = c.search(title=title1_1, spaceKey='AR')['results']
            # 如果不存在就调用爬虫接口，开始爬取
            if not have:
                send_gartner_spider(title1_1, link1)
                # time.sleep(60)
        elif len(title2) != 0:
            # 使用新的xpath获取标题和文章链接
            title1_2 = title2[0].text.strip()
            link1 = browser.find_elements_by_xpath('//div[%s]/table/tbody/tr/td/table/tbody/tr/td[1]/'
                                                   'table/tbody/tr[2]/td/p/b/span/a' % i)[0].get_attribute('href')
            # 查询是否存在ar阅览室
            have = c.search(title=title1_2, spaceKey='AR')['results']
            # 如果不存在就调用爬虫接口，开始爬取
            if not have:
                send_gartner_spider(title1_2, link1)

        elif len(title1) == 0 and len(title2) == 0:
            continue

    print('8--所有文章爬取完毕')


def second_ui(browser):
    """
    第二种ui,有三种标题格式，且三种标题都有单独的循环，所以在一个for循环使用三个if进行判断
    :return:
    """
    # 第一批
    for i in range(1, 15):
        title1 = browser.find_elements_by_xpath('//tr[%s]/td/table/tbody/tr/td/table/tbody/'
                                                'tr[1]/td/p/a[2]/span/span' % i)
        # 判断标题xpath属于哪一种文章格式
        # 如果不为空，说明文章标题ui属于第一种格式，开始爬取
        # 如果为空，则说明第一种标题格式的文章已爬取完毕，开始第二种的爬取
        if len(title1) != 0:
            # 获取标题
            title1_1 = title1[0].text.strip()
            # 获取文章链接
            link1 = browser.find_elements_by_xpath('//tr[%s]/td/table/tbody/tr/td/table/tbody/'
                                                   'tr[1]/td/p/a[2]' % i)[0].get_attribute('href')
            # 查询是否存在ar阅览室
            have = c.search(title=title1_1, spaceKey='AR')['results']
            # 如果不存在就调用爬虫接口，开始爬取
            if not have:
                send_gartner_spider(title1_1, link1)
                # time.sleep(60)
        else:
            continue

    # 第二批
    for i in range(2, 40, 4):
        title2 = browser.find_elements_by_xpath('/html/body/div/div/div[2]/div/div[7]/div/table/tbody/tr/td/div[3]/'
                                                'div/table/tbody/tr/td/table/tbody/tr/td/table/tbody/tr[%s]/'
                                                'td/p/a[2]/span/span' % i)
        # 如果不为空，说明文章标题ui属于第二种格式，开始爬取
        if len(title2) != 0:
            # 获取标题
            title1_2 = title2[0].text.strip()
            link1 = browser.find_elements_by_xpath('/html/body/div/div/div[2]/div/div[7]/div/table/tbody/tr/td/div[3]/'
                                                   'div/table/tbody/tr/td/table/tbody/tr/td/table/tbody/tr[%s]/'
                                                   'td/p/a[2]' % i)[0].get_attribute('href')
            # 查询是否存在ar阅览室
            have = c.search(title=title1_2, spaceKey='AR')['results']
            # 如果不存在就调用爬虫接口，开始爬取
            if not have:
                send_gartner_spider(title1_2, link1)
        else:
            continue
    # 第三批
    for i in range(1, 20):
        title3 = browser.find_elements_by_xpath('/html/body/div/div/div[2]/div/div[8]/table/tbody/tr/td/div[3]/div/'
                                                'table/tbody/tr[%s]/td/table/tbody/tr/td/'
                                                'table/tbody/tr[2]/td/p/a[2]/span/span' % i)
        # 如果不为空，说明文章标题ui属于第三种格式，开始爬取
        if len(title3) != 0:
            # 获取标题
            title1_3 = title3[0].text.strip()
            link1 = browser.find_elements_by_xpath('/html/body/div/div/div[2]/div/div[8]/table/tbody/tr/td/div[3]/div/'
                                                   'table/tbody/tr[%s]/td/table/tbody/tr/td/'
                                                   'table/tbody/tr[2]/td/p/a[2]' % i)[0].get_attribute('href')
            # 查询是否存在ar阅览室
            have = c.search(title=title1_3, spaceKey='AR')['results']
            # 如果不存在就调用爬虫接口，开始爬取
            if not have:
                send_gartner_spider(title1_3, link1)
        else:
            continue
    print('8--所有文章爬取完毕')


def third_ui(browser):
    """
    第三种标题格式，对应Forrester推送邮件
    :return:
    """
    # 第一批
    for i in range(1, 25, 2):
        title1 = browser.find_elements_by_xpath('//table/tbody/tr[4]/td/'
                                                'table[%s]/tbody/tr[2]/td/p/span/a/b/span' % i)
        if len(title1) != 0:
            # 获取标题
            title1_1 = title1[0].text.strip()
            # 获取文章链接
            link1 = browser.find_elements_by_xpath('//table/tbody/tr[4]/td/'
                                                   'table[%s]/tbody/tr[2]/td/p/span/a' % i)[0].get_attribute('href')
            # 查询是否存在ar阅览室
            have = c.search(title=title1_1, spaceKey='AR')['results']
            # 如果不存在就调用爬虫接口，开始爬取
            if not have:
                send_Forrester_spider(title1_1, link1)
        else:
            continue
    # 第二批
    for i in range(1, 25, 2):
        title2 = browser.find_elements_by_xpath('//table/tbody/tr[6]/td/table[%s]/'
                                                'tbody/tr[2]/td/p/span/a/b/span' % i)
        if len(title2) != 0:
            # 获取标题
            title1_2 = title2[0].text.strip()
            # 获取文章链接
            link2 = browser.find_elements_by_xpath('//table/tbody/tr[6]/td/table[%s]/'
                                                   'tbody/tr[2]/td/p/span/a' % i)[0].get_attribute('href')
            # 查询是否存在ar阅览室
            have = c.search(title=title1_2, spaceKey='AR')['results']
            # 如果不存在就调用爬虫接口，开始爬取
            if not have:
                send_Forrester_spider(title1_2, link2)
        else:
            continue


def fourth_ui(browser):
    """
    第四种标题格式，对应gartner推送邮件
    :return:
    """
    for i in range(1, 10):
        title1 = browser.find_elements_by_xpath('//tr[%s]/td/div/table/tbody/tr/td[1]/table/'
                                                'tbody/tr[1]/td/p/a[2]/span/span' % i)
        if len(title1) != 0:
            # 获取标题
            title1_1 = title1[0].text.strip()
            # 获取文章链接
            link1 = browser.find_elements_by_xpath('//tr[%s]/td/div/table/tbody/tr/td[1]/table/'
                                                   'tbody/tr[1]/td/p/a[2]' % i)[0].get_attribute('href')
            # 查询是否存在ar阅览室
            have = c.search(title=title1_1, spaceKey='AR')['results']
            # 如果不存在就调用爬虫接口，开始爬取
            if not have:
                # print('111111111111')
                send_gartner_spider(title1_1, link1)


if __name__ == '__main__':
    headers = down_mail()
    main(headers)
