# -*- coding: utf-8 -*-
"""
@File:    parse_service.py
@Author:  jk.dong
@Desc:    邮件解析模块服务
"""
import socket
import copy
import time
import urllib.request as urlreq
from abc import abstractmethod

from docwork.service.base_service import BaseService
from docwork.utils import *
from docwork.config import BASE_PATH, db1_type, table_km_brokermail, table_broker, table_doctype, table_stock
from docwork.dao.receive_dao import RECV_DB


TitleExtractSwitch = RECV_CONF['receive-setting'].get('title-extract').get('switch')
TitleExtractMode = RECV_CONF['receive-setting'].get('title-extract').get('use_mode')
if TitleExtractSwitch and TitleExtractMode == 'nlp':
    from docwork.utils.title_extraction import OriginTitleExtractor


class ParseService(BaseService):
    def __init__(self):
        super().__init__('parse')
        self.client_instance = None
        self.filename_filter_token = r'[/\\:*?\"<>!]'

    def client(self, username: str, password: str, server: str, port: int, protocol: str, timeout: int):
        try:
            # 设置邮箱服务器的连接超时时间
            socket.setdefaulttimeout(timeout)
            if protocol == "POP3" or protocol == "IMAP":
                self.client_instance = DirectConClient(username, password, server, port, protocol)
            elif protocol == "EWS" or protocol == "COREMAIL":
                self.client_instance = HttpClient(username, password, server, port, protocol)
            elif protocol == "EML":
                self.client_instance = MailClient(username, password, server, port, protocol)
                # 检查eml路径
                eml_path = Path(RECV_CONF['receive-setting'].get('recv_eml_path'))
                if eml_path.exists() and eml_path.is_dir():
                    self.client_instance.uid_list = ['eml'+file.stem for file in eml_path.iterdir() if file.is_file()]
                else:
                    logging.error(f"recv_eml_path（{eml_path}）配置路径错误，请检查")
                    raise
        except Exception as err:
            # logging.info(traceback.format_exc())
            logging.error(f"邮箱服务器连接异常. Error: {err}")
        return self.client_instance

    def analysis_email(self, index, rawmaillog_id, customer_id, parse_type='server', ews_mail=None):
        """
        接收邮件，解析处理
        :params parse_type: server表示从邮箱服务器中解析邮件、eml表示解析eml文件、backup表示从本地备份文件解析
        """
        try:
            if parse_type == 'server':
                msg = self.client_instance.get_message(index, rawmaillog_id)
            elif parse_type == 'eml':
                # 解析的文件名为{rawmail_id}.eml
                msg = self.client_instance.get_message_from_eml(index[3:], rawmaillog_id, eml_dir=Path(RECV_CONF['receive-setting'].get('recv_eml_path')))
            else:
                msg = self.client_instance.get_message_from_eml(rawmaillog_id, rawmaillog_id, eml_dir=RECV_CONF['receive-setting'].get('rawmail_path'))
            logging.info("开始处理邮件消息体")
            # 服务端接收邮件时解析html信息，以将解析内容写入到数据库表的summary字段
            if (not RECV_CONF['receive-setting'].get('client_mode')) and (BODY_SOURCE_REGION.get('analyze')):
                logging.info("Server Process: 配置项AnalyzeBodySource为true")
                mail_from = ""
                if BODY_SOURCE_REGION.get('sender_list') and len(msg.attachments) > 0:
                    # 解析recvxinfo.xml，获取卖方id
                    logging.info("Getting mailfrom from recvinfo.xml")
                    for attach in msg.attachments:
                        if attach['attachName'] == 'recvinfo.xml':
                            path_file = attach['path']
                            if path_file:
                                ri = read_xml(path_file)
                                mail_from = ri.get('mailfrom')
                                logging.info(f"Mail From:{mail_from}")
                                break
                if BODY_SOURCE_REGION.get('customer_list') and BODY_SOURCE_REGION.get('sender_list') \
                        and BODY_SOURCE_REGION.get('title_list') and len(msg.attachments) > 0:
                    bodysource_customer_list = BODY_SOURCE_REGION.get('customer_list')
                    bodysource_sender_list = BODY_SOURCE_REGION.get('sender_list')
                    bodysource_title_list = BODY_SOURCE_REGION.get('title_list')
                    if (str(customer_id) in bodysource_customer_list) and (mail_from in bodysource_sender_list):
                        title_trigger = False
                        for i in bodysource_title_list:
                            if i in msg.subject:
                                title_trigger = True
                                logging.info("开始解析 xn_bodySource.html")
                                for attach in msg.attachments:
                                    if attach['attachName'] == 'xn_bodySource.html':
                                        # 获取html内容
                                        path_file = attach.get('path')
                                        if path_file:
                                            with open(path_file, 'r') as f:
                                                msg.body_html = f.read()
                                            logging.info(f"Get body html from {path_file} successfully!")
                                            break
                                        else:
                                            logging.warning(f"Get body html failed. Attach path:{path_file}")
                        if not title_trigger:
                            logging.info("No title trigger.")
                    else:
                        logging.info("Current message not suitable in body source white lists.")
                else:
                    logging.info("Attachment count is 0 or body source white lists is empty.")
            hidden_attachment_list = []
            download_attachurllist = RECV_CONF['receive-setting'].get('attachment_download_urls')
            if download_attachurllist:
                logging.info(f"已配置附件url（域名）的下载链接：{download_attachurllist}")
            if msg.body_html:
                logging.info(f"Use Html Body! body length:{len(msg.body_html)}.")
                msg.content = self.strip_html(msg.body_html)
                if download_attachurllist:
                    hidden_attachment_list = self.get_attachurls_from_bodyhtml(msg.body_html, download_attachurllist)
                logging.info(f"hiddenAttachUrlList(html-format) ==> {hidden_attachment_list}.")
            elif msg.body_text:
                logging.info(f"Use text body! body length:{len(msg.body_text)}.")
                msg.content = msg.body_text
                # 去掉RTF格式邮件在下载地址中加入换行符
                msg.content = re.sub(r'(<http[s]?://[^>]+)[\r]?[\n]?(P)[\r]?[\n]?(D)[\r]?[\n]?(F)[\r]?[\n]?([^>]+>)',
                                     r'\1\2\3\4\5', msg.content, flags=re.IGNORECASE)
                # 去掉RTF格式邮件中的PNG图片下载链接
                msg.content = re.sub(
                    r'(<)(http[s]?://[^>]+)[\r]?[\n]?(P)[\r]?[\n]?(D)[\r]?[\n]?(F)[\r]?[\n]?([^>]*).png(>)', '',
                    msg.content, flags=re.IGNORECASE)
                # 去掉RTF格式邮件中的JPG图片下载链接
                msg.content = re.sub(
                    r'(<)(http[s]?://[^>]+)[\r]?[\n]?(P)[\r]?[\n]?(D)[\r]?[\n]?(F)[\r]?[\n]?([^>]*).jpg(>)', '',
                    msg.content, flags=re.IGNORECASE)

                if download_attachurllist:
                    hidden_attachment_list = self.get_attachurls_from_bodyhtml(msg.content, download_attachurllist,
                                                                               rtf=True)
                # 在RTF格式邮件中的PDF下载链接前加入"下载地址:"
                msg.content = re.sub(
                    r'(<)(http[s]?://[^>]+)[\r]?[\n]?(P)[\r]?[\n]?(D)[\r]?[\n]?(F)[\r]?[\n]?([^>]+)(>)',
                    r'下载地址:\2\3\4\5\6', msg.content, flags=re.IGNORECASE)
                logging.info(f"hiddenAttachUrlList(text-format) ==>{hidden_attachment_list}")
            else:
                logging.warning("邮件摘要为空！")

            # 是否从连接中去下载附件，如果有附件了就不下载
            have_valid_attachment = False
            xn_bodysource_index = -1
            download_fail = False

            # 判断摘要里是否有 点击下载附件 的链接，如果有就把附件下载下来
            if len(hidden_attachment_list) > 0:
                # requests.get(url, verify=False)
                import ssl
                for attach_url in hidden_attachment_list:
                    try:
                        if not attach_url.startswith('http') or not attach_url.startswith('https'):
                            logging.info("非http开头的链接，Continue...")
                            continue
                        if attach_url.find('rdp.cicc.com.cn/pdf-reader/read') >= 0:
                            # 局部取消ssl证书验证
                            context = ssl._create_unverified_context()
                            response_data = urlreq.urlopen(attach_url, context=context).read().decode('utf-8')
                            if response_data.find("window.PDFViewerApplicationOptions.set('defaultUrl', '") >= 0:
                                s1 = response_data[
                                     response_data.index(
                                         "window.PDFViewerApplicationOptions.set('defaultUrl', '") + 54:]
                                tartget_url = s1[:s1.index("');")]
                                attach_url = "https://rdp.cicc.com.cn/pdf-reader/" + tartget_url
                        # 摩根大通pdf
                        if "jpmorgan.com" in attach_url:
                            logging.info("jpmorgan.com in attachurl")
                            context = ssl._create_unverified_context()
                            response_data = urlreq.urlopen(attach_url, context=context).read().decode('utf-8')
                            reg = re.compile(r'<a\s+[^>]*href=\"/research/open/url/[\S]+?forcePdf=1"',
                                             flags=re.IGNORECASE)
                            matches = reg.findall(response_data)
                            logging.info(f"正则匹配到url为{matches}")
                            if matches:
                                tartget_url = matches[0].split("href=")[1].strip('"')
                                if "jpmsc.portal.jpmorgan.com" in attach_url:
                                    attach_url = "https://jpmsc.portal.jpmorgan.com" + tartget_url
                                elif "markets.jpmorgan.com" in attach_url:
                                    attach_url = "https://markets.jpmorgan.com" + tartget_url

                        attach_url = attach_url.rstrip('/')
                        attach_name = attach_url[attach_url.rindex('/') + 1:]
                        attach_name = 'XNDL' + attach_name.replace('\r', '').replace('\n', '').replace('\t', '').strip()
                        # 验证文件名的有效性：文件名不能包含下列字符(/ \ : * ? \  " < > !)
                        attach_name = re.sub(self.filename_filter_token, '', attach_name, flags=re.IGNORECASE)
                        if not attach_name:
                            attach_name = f'{rawmaillog_id}_{len(msg.attachments)+1}.pdf'
                        elif attach_name.endswith('p df'):
                            attach_name = attach_name.replace("p df", "pdf")
                        if attach_name.find('.') < 0:
                            logging.info("url附件名无后缀, 默认添加'.pdf'后缀")
                            attach_name += ".pdf"

                        # 解析中金公司报告的正文链接下载的附件中文名
                        if re.match(r"http(s)?://rdp.cicc.com.cn/pdf-reader/getFile[\S]+", attach_url):
                            context = ssl._create_unverified_context()
                            headers = urlreq.urlopen(attach_url, context=context).info()
                            for key in headers.keys():
                                if key.lower() == 'content-disposition'.lower():
                                    str_file_name = headers[key]
                                    if str_file_name is not None:
                                        fstr = "filename="
                                        pos1 = str_file_name.find(fstr)
                                        if pos1 >= 0:
                                            fn = str_file_name[pos1 + len(fstr):]
                                            if fn.find(";") >= 0:
                                                fn = fn[:fn.index(";")]
                                                fn = fn.strip('"')
                                            if fn != "" and fn.endswith(".pdf"):
                                                logging.info("start analyze Implicit filename--cicc")
                                                attach_name = "XNDL" + urlreq.url2pathname(fn)
                        # 解析国泰君安报告的正文链接下载的附件中文名
                        elif re.match(r"http(s)?://cdn-idp.gtjaqh.com/report[\S]+", attach_url):
                            fn = attach_url.split("/")[-1]
                            if fn != "" and fn.endswith(".pdf"):
                                logging.info("start analize Implicit filename--gtja")
                                attach_name = "XNDL" + urlreq.url2pathname(fn)
                        # 解析新时代证券报告的正文链接下载的附件中文名
                        elif re.match(r"http(s)?://(.*)FileDownloadPDF[\S]+", attach_url):
                            context = ssl._create_unverified_context()
                            headers = urlreq.urlopen(attach_url, context=context).info()
                            for key in headers.keys():
                                if key.lower() == 'content-disposition'.lower():
                                    str_file_name = headers[key]
                                    attach_name = "XNDL" + \
                                                  str_file_name.encode('ISO-8859-1').decode('gb18030').split(
                                                      'filename=')[
                                                      1]
                        # 解析海通证券报告的正文链接下载的附件中文名
                        # elif re.match(r"http(s)?://equities.htisec.com+[^\"]+report-view-direct[\S]+", attach_url):
                        #     pass
                        # 解析华兴证券报告的正文链接下载的附件中文名
                        elif re.match(r"http(s)?://researchdl.huaxingsec.com/CR_TD_TRACK_PROD_SH/external[\S]+",
                                      attach_url):
                            context = ssl._create_unverified_context()
                            headers = urlreq.urlopen(attach_url, context=context).info()
                            for key in headers.keys():
                                if key.lower() == 'content-disposition'.lower():
                                    str_file_name = headers[key]
                                    if str_file_name is not None:
                                        fstr = "filename="
                                        pos1 = str_file_name.find(fstr)
                                        if pos1 >= 0:
                                            fn = str_file_name[pos1 + len(fstr) + 1:-1]
                                            fn.replace('"', '')
                                            if fn.find(";") >= 0:
                                                fn = fn[:fn.index(";")]
                                                fn = fn.strip('"')
                                            if fn != "" and fn.endswith(".pdf"):
                                                logging.info("开始解析隐藏文件名--huaxing")
                                                attach_name = "XNDL" + urlreq.url2pathname(fn)
                        # 解析摩根大通报告的正文链接下载的附件中文名
                        elif re.match(r"http(s)?://(jpmsc.portal|markets).jpmorgan.com/research[\S]+", attach_url):
                            # 摩根大通的链接是一个html文件，解析html，找到click here字样的a标签，再用正则提取其中的url链接
                            context = ssl._create_unverified_context()
                            headers = urlreq.urlopen(attach_url, context=context).info()
                            str_file_name = headers.get("content-disposition")
                            if str_file_name:
                                fstr = "filename="
                                pos1 = str_file_name.find(fstr)
                                if pos1 >= 0:
                                    fn = str_file_name[pos1 + len(fstr):]
                                    if fn.find(";") >= 0:
                                        fn = fn[:fn.index(";")]
                                        fn = fn.strip('"')
                                    if fn != "" and fn.endswith(".pdf"):
                                        logging.info("开始解析隐藏文件名--jpmorgan")
                                        attach_name = "XNDL" + urlreq.url2pathname(fn)

                        filename = f'{rawmaillog_id}_{len(msg.attachments)+1}{attach_name[attach_name.rindex('.'):]}'
                        try:
                            urlreq.urlretrieve(attach_url, RECV_CONF['receive-setting'].get('rawattach_path').joinpath(filename))
                            logging.info(f"附件url:{attach_url}已下载")
                        except Exception as err:
                            download_fail = True
                            logging.error(f"附件url解析下载失败:{attach_url}\tError Message:{err}")

                        # 计算链接附件的md5值，再跟已有附件比较是否重复
                        rawattach_file = Path(RECV_CONF['receive-setting'].get('rawattach_path')).joinpath(filename)
                        if not rawattach_file.exists():
                            continue
                        else:
                            current_file_md5 = get_md5(rawattach_file)
                            if compare_digest_files(current_file_md5, msg.attachments):
                                continue
                        msg.add_attachment(attach_name, RECV_CONF['receive-setting'].get('rawattach_path'))
                        # 删除已下载成功的url链接
                        if not download_fail and RECV_CONF['receive-setting'].get('remove_download_urls'):
                            logging.info("删除摘要中的url下载链接")
                            msg.content = msg.content.replace(attach_url, ' ')
                            # logging.info(f"摘要内容：{msg.content}")
                    except Exception as err:
                        logging.info(traceback.format_exc())
                        logging.error(f"附件url解析流程发生异常. Error: {err}")

            if len(msg.attachments) > 0:
                # 记录附件的数目
                attach_count = 1
                attachments = copy.deepcopy(msg.attachments)
                msg.attachments.clear()
                # attach_count要考虑xn_bodySource.html和recvinfo.xml的情况
                for attachment in attachments:
                    attachment['attachName'] = attachment['attachName'].replace('\r', '').replace('\n', '').replace(
                        '\t', '').strip()
                    attachment['attachName'] = re.sub(self.filename_filter_token, '', attachment['attachName'],
                                                      flags=re.IGNORECASE)
                    # if not attachment['attachName']:
                    #     attachment['attachName'] = os.path.basename(attachment['path'])
                    if attachment['attachName'].endswith('p df'):
                        attachment['attachName'] = attachment['attachName'].replace('p df', 'pdf')
                    if attachment['attachName'].find('.') < 0:
                        logging.error("attachName no suffix, add default suffix:'.pdf'")
                        attachment['attachName'] += ".pdf"

                    if RECV_CONF['receive-setting'].get('save_body_image') and \
                            attachment['Content-ID'] and attachment['attachName']:
                        msg.content = msg.content.replace(attachment['Content-ID'], attachment['attachName'])

                    logging.info(f'attachName:{attachment['attachName']}\tpath:{attachment['path']}\tattachSize:{attachment['attachSize']}\tContent-ID:{attachment[
                                                                                                   'Content-ID']}')
                    msg.attachments.append(attachment)
                    attach_count += 1
                    if attachment['attachName'] == "xn_bodySource.html":
                        xn_bodysource_index = attach_count - 2
                    if (attachment['attachName'] != "recvinfo.xml") and (
                            attachment['attachName'] != "xn_bodySource.html"):
                        have_valid_attachment = True

            # 服务端才执行
            if not RECV_CONF['receive-setting'].get('client_mode'):
                if (BODY_SOURCE_REGION.get('delete_when_vaild') and not download_fail
                        and have_valid_attachment and xn_bodysource_index >= 0):
                    logging.info("服务器端解析HTML时正文下载链接未异常，并且带有效附件，删除HTML")
                    # 从msg.attachments列表里删除对应的文件
                    msg.attachments.pop(xn_bodysource_index)

            logging.info(f"共解析到{len(msg.attachments)}篇附件")
            msg.analyse_success = True
            msg.customer_id = customer_id
            return msg
        except Exception as err:
            logging.info(traceback.format_exc())
            logging.error(f"邮件解析流程发生异常. Error:{err}")
            return None

    @staticmethod
    def strip_html(source):
        try:
            result = source.replace('\r', ' ')
            result = result.replace('\n', ' ')

            # Remove the span
            result = re.sub(r'<( )*span([^>])*>', '', result, flags=re.IGNORECASE)
            result = re.sub(r'(<( )*(/)( )*span( )*>)', '', result, flags=re.IGNORECASE)

            # 保留点击查看原文1 链接内写有 点击
            # tips：增加[^>]*以匹配<a target="_blank" href="http这种情况
            result = re.sub(r'<a\\s+[^>]*href=\"([^\"]*)\"[^>]*>([\u70b9][^<]*)', r'\2\1', result, flags=re.IGNORECASE)

            # 对内容中的附件url链接修改描述
            result = re.sub(r'<a\\s+[^>]*href=\"(http(s)?://[^\"]+PDF[^\"]+)\"[^>]*>([^<]*)</a>', r'\3 下载地址:\1',
                            result,
                            flags=re.IGNORECASE)
            result = re.sub(
                r'<a\\s+[^>]*href=\"(http(s)?://[^\"]+cmschina[^\"]+ResearchReport[^\"]+)\"[^>]*>([\\s\\S]*?)</a>',
                r'\3 下载地址:\1',
                result, flags=re.IGNORECASE)
            result = re.sub(r'<a\\s+[^>]*href=\"(http(s)?://[^\"]+report-view-direct[^\"]+)\"[^>]*>([\\s\\S]*?)</a>',
                            r'\3 下载地址:\1',
                            result, flags=re.IGNORECASE)
            result = re.sub(
                r'<a\\s+[^>]*href=\"(http(s)?://cdn-idp.gtjaqh.com/report+[^\"]+(\d{4}[-|.]?\d{2}[-|.]?\d{2}).pdf)</a>',
                r'\3 下载地址:\1',
                result, flags=re.IGNORECASE)
            result = re.sub(
                r'<a\\s+[^>]*href=\"(http(s)?://researchdl.huaxingsec.com/CR_TD_TRACK_PROD_SH/external/download(\?[0-9a-zA-Z=&%_\-]*)?(\/[-a-zA-Z0-9_:@&?=+,.!/~*%$]*)?)</a>',
                r'\3 下载地址:\1',
                result, flags=re.IGNORECASE)
            result = re.sub(
                r'<a\\s+[^>]*href=\"(http(s)?://markets.jpmorgan.com/research/email/[a-zA-Z0-9]*/[0-9a-zA-Z=&%/_\-]*)\"[^>]*>([\\s\\S]*?)</a>',
                r'\3 下载地址:\1',
                result, flags=re.IGNORECASE)
            result = re.sub(
                r'<a\\s+[^>]*href=\"(http(s)?://jpmsc.portal.jpmorgan.com/research/publicUnauthenticated/email/[a-zA-Z0-9]*/[0-9a-zA-Z=&%/_\-]*)\"[^>]*>([\\s\\S]*?)</a>',
                r'\3 下载地址:\1',
                result, flags=re.IGNORECASE)
            result = re.sub(r'<a\\s+href=\"([^\"]*\\.pdf)\"[^>]*>', r'\1 ', result, flags=re.IGNORECASE)
            result = re.sub(r'<a\\s+href=\"([^\"]*)\"[^>]*>([^>]*\\.pdf)</a>', r'\1\2', result, flags=re.IGNORECASE)

            # Remove step-formatting
            result = result.replace('\t', '')
            # Remove repeating speces becuase browsers ignore them
            result = re.sub(r'( )+', ' ', result)

            # Remove the header (prepare first by clearing attributes)
            result = re.sub(r'<( )*head([^>])*>', '<head>', result, flags=re.IGNORECASE)
            result = re.sub(r'(<( )*(/)( )*head( )*>)', '</head>', result, flags=re.IGNORECASE)
            result = re.sub('(<head>).*(</head>)', '', result, flags=re.IGNORECASE)

            # remove all scripts (prepare first by clearing attributes)
            result = re.sub(r'<( )*script([^>])*>', '<script>', result, flags=re.IGNORECASE)
            result = re.sub(r'(<( )*(/)( )*script( )*>)', '</script>', result, flags=re.IGNORECASE)
            result = re.sub(r'(<script>).*(</script>)', '', result, flags=re.IGNORECASE)

            # remove all styles (prepare first by clearing attributes)
            result = re.sub(r'<( )*style([^>])*>', '<style>', result, flags=re.IGNORECASE)
            result = re.sub(r'(<( )*(/)( )*style( )*>)', '</style>', result, flags=re.IGNORECASE)
            result = re.sub('(<style>).*(</style>)', '', result, flags=re.IGNORECASE)

            # insert tabs in spaces of <td> tags
            result = re.sub(r'<( )*td([^>])*>', '\t', result, flags=re.IGNORECASE)

            # insert line breaks in places of <BR> and <LI> tags
            result = re.sub(r'<( )*br( )*>', '\r', result, flags=re.IGNORECASE)
            result = re.sub(r'<( )*li( )*>', '\r', result, flags=re.IGNORECASE)

            # insert line paragraphs (double line breaks) in place
            # if <P>, <DIV> and <TR> tags
            result = re.sub(r'<( )*div([^>])*>', '\r\r', result, flags=re.IGNORECASE)
            result = re.sub(r'<( )*tr([^>])*>', '\r\r', result, flags=re.IGNORECASE)
            result = re.sub(r'<( )*p([^>])*>', '\r\r', result, flags=re.IGNORECASE)

            if RECV_CONF['receive-setting'].get('save_body_image'):
                result = re.sub(r'<( )*img([^>]*)>', r'#{img\2}#', result, flags=re.IGNORECASE)

            # Remove remaining tags like <a>, links, images,
            # comments etc - anything thats enclosed inside < >
            result = re.sub(r'<[^>]*>', '', result, flags=re.IGNORECASE)

            # replace special characters:
            result = re.sub(r'&nbsp;', ' ', result, flags=re.IGNORECASE)
            result = re.sub(r'&bull;', ' * ', result, flags=re.IGNORECASE)
            result = re.sub(r'&lsaquo;', '<', result, flags=re.IGNORECASE)
            result = re.sub(r'&rsaquo;', '>', result, flags=re.IGNORECASE)
            result = re.sub(r'&trade;', '(tm)', result, flags=re.IGNORECASE)
            result = re.sub(r'&frasl;', '/', result, flags=re.IGNORECASE)
            result = re.sub(r'&copy;', '(c)', result, flags=re.IGNORECASE)
            result = re.sub(r'&reg;', '(r)', result, flags=re.IGNORECASE)
            result = re.sub(r'&(.{2,6});', '', result, flags=re.IGNORECASE)

            # make line breaking consistent
            result = result.replace('\n', '\r')

            # Remove extra line breaks and tabs:
            # replace over 2 breaks with 2 and over 4 tabs with 4.
            # Prepare first to remove any whitespaces inbetween
            # the escaped characters and remove redundant tabs inbetween linebreaks
            result = re.sub(r'(\r)( )+(\r)', '\r\r', result, flags=re.IGNORECASE)
            result = re.sub(r'(\t)( )+(\t)', '\t\t', result, flags=re.IGNORECASE)
            result = re.sub(r'(\t)( )+(\r)', '\t\r', result, flags=re.IGNORECASE)
            result = re.sub(r'(\r)( )+(\t)', '\r\t', result, flags=re.IGNORECASE)

            # Remove redundant tabs
            result = re.sub(r'(\r)(\t)+(\r)', '\r\r', result, flags=re.IGNORECASE)

            # Remove multible tabs followind a linebreak with just one tab
            result = re.sub(r'(\r)(\t)+', '\r\t', result, flags=re.IGNORECASE)

            # Initial replacement target string for linebreaks
            breaks = r"\r\r\r"
            # Initial replacement target string for tabs
            tabs = r"\t\t\t\t\t"
            index = 0
            while index < len(result):
                result = result.replace(breaks, '\r\r')
                result = result.replace(tabs, '\t\t\t\t')
                breaks = breaks + "\r"
                tabs = tabs + "\t"
                index += 1

            if RECV_CONF['receive-setting'].get('save_body_image'):
                result = result.replace('#{', '<')
                result = result.replace('}#', '>')

            return result
        except Exception as err:
            logging.info(traceback.format_exc())
            logging.error(f"Convert HTML to Text Error:{err}")
            return source

    @staticmethod
    def get_attachurls_from_bodyhtml(bodyhtml, urls, rtf=False):
        """
        从html body中查找要下载的附件url链接
        :param bodyhtml: html source内容对象
        :param urls: 域名列表，指定要下载的附件url链接
        :param rtf: boolean
        :return:
        """
        url_list = []
        result = bodyhtml.replace('\r', ' ')
        result = result.replace('\n', ' ')

        result = re.sub(r'<( )*span([^>])*>', '', result, flags=re.IGNORECASE)
        result = re.sub(r'(<( )*(/)( )*span( )*>)', '', result, flags=re.IGNORECASE)

        # reg1 = re.compile('<a\\s+[^>]*href=\"([^\"]*\\.pdf)\"[^>]*>')
        # matches = reg1.finditer(result)
        # for match in matches:
        #     if match.group(1) and match.group(1).strip():
        #         url_list.append(match.group(1).strip())
        # reg2 = re.compile('<a\\s+[^>]*href=\"([^\"]*)\"[^>]*>([^>]*\\.pdf)</a>')
        # matches = reg2.finditer(result)
        # for match in matches:
        #     if match.group(1) and match.group(1).strip():
        #         url_list.append(match.group(1).strip())
        # # 匹配点击下载
        # reg3 = re.compile('<a\\s+[^>]*href=\"([^\"]*)\"[^>]*>([\u70b9][^<]*)')
        # matches = reg3.finditer(result)
        # for match in matches:
        #     if match.group(1) and match.group(1).strip():
        #         logging.info("点击下载步骤。。。")
        #         url_list.append(match.group(1).strip())
        # # 对结尾带有.pdf的url进行匹配
        # reg4 = re.compile("http(s)?://([\w-]+\.)+[\w-]+([0-9a-zA-Z-./?%&=]+?(?=\.pdf))\.pdf", flags=re.IGNORECASE)
        # matches = reg4.finditer(result)
        # for match in matches:
        #     if match.group(1) and match.group(1).strip():
        #         url_list.append(match.group(1).strip())

        # 是否是RTF格式邮件
        if rtf:
            reg_rtf = re.compile(r'<(http[s]?://[^>]+PDF[^>]+)>', flags=re.IGNORECASE)
            matches = reg_rtf.finditer(result)
            for match in matches:
                if match.group(1):
                    url = match.group(1).strip().replace('&amp;', '&')
                    if url and not url.endswith(".png") and not url.endswith(".jpg"):
                        url_list.append(url)

        for url in urls:
            if "xsdzq" in url:
                # 匹配新时代证券的非.pdf结尾下载链接
                reg5 = re.compile(r'<a\\s+[^>]*href=\"(http(s)?://[^\"]+PDF[^\"]+)\"', flags=re.IGNORECASE)
                matches = reg5.finditer(result)
                for match in matches:
                    if match.group(1):
                        url = match.group(1).strip().replace('&amp;', '&')
                        if url:
                            url_list.append(url)
            elif "rdp.cicc" in url:
                # 匹配中金的pdf下载链接
                reg6 = re.compile(r'<a\\s+[^>]*href=\"(http(s)?://[^\"]+rdp.cicc[^\"]+pdf-reader/getFile[\S]+)\"',
                                  flags=re.IGNORECASE)
                matches = reg6.finditer(result)
                for match in matches:
                    if match.group(1):
                        url = match.group(1).strip().replace('&amp;', '&')
                        if url:
                            url_list.append(url)
            elif "cmschina" in url:
                # 匹配招商证券国际的下载链接 http://www.cmschina.com.hk/cmshk/ResearchReport?id=286431
                reg7 = re.compile(r'<a\\s+[^>]*href=\"(http(s)?://[^\"]+cmschina[^\"]+ResearchReport[^\"]+)\"',
                                  flags=re.IGNORECASE)
                matches = reg7.finditer(result)
                for match in matches:
                    if match.group(1):
                        url = match.group(1).strip().replace('&amp;', '&')
                        if url:
                            url_list.append(url)
            elif "htisec" in url:
                # 匹配海通国际的下载链接 https://equities.htisec.com/app/public-research/report-view-direct?language=en&_S=oAyijAapwXHGiLPFSKa
                reg8 = re.compile(
                    r'<a\\s+[^>]*href=\"(http(s)?://[^\"]+report-view-direct[^\"]+)\"[^>]*>([\\s\\S]*?)</a>',
                    flags=re.IGNORECASE)
                matches = reg8.finditer(result)
                for match in matches:
                    if match.group(1):
                        url = match.group(1).strip().replace('&amp;', '&')
                        if url:
                            url_list.append(url)
            elif "gtjaqh" in url:
                # 匹配国泰君安的下载链接 https://cdn-idp.gtjaqh.com/report/0ea0e308-3d51-11ee-9b5e-0242ac190002/00-%E5%9B%BD%E6%B3%B0%E5%90%9B%E5%AE%89%E6%9C%9F%E8%B4%A7_%5B%E6%A3%95%E6%A6%88%E6%B2%B9%E3%80%81%E8%B1%86%E6%B2%B9%5D_R3_%E6%99%A8%E6%8A%A5_20230818.pdf
                reg9 = re.compile(r'http(s)?://cdn-idp.gtjaqh.com/report+[^\"]+(\d{4}[-|.]?\d{2}[-|.]?\d{2}).pdf',
                                  flags=re.IGNORECASE)
                matches = reg9.finditer(result)
                for match in matches:
                    if match.group(0):
                        url = match.group(0).strip().replace('&amp;', '&')
                        if url:
                            url_list.append(url)
            elif "huaxingsec" in url:
                # 匹配华兴证券的下载链接https://researchdl.huaxingsec.com/CR_TD_TRACK_PROD_SH/external/download?q=24e693ee2d4f7ecc3d78e17ccc66784c2fe33b237fIYfhbeBunLCW-LtTeDl-rbpW6rSx5JjkUAIOs-loer6FR4Qbw_a031rtVIe31hj7twvz57VckMIcqtBWnBibmp05iDPqJnb5VUDwZP-awBdAn8bpp4xml4OcTz9Sm7eHFEpolVNzyTBPsKAlUkpj9AEPCo3XLExjnLcMw6ed9FLJWUwDRnnnlTKdxvULSoe
                reg10 = re.compile(
                    r"http(s)?://researchdl.huaxingsec.com/CR_TD_TRACK_PROD_SH/external/download(\?[0-9a-zA-Z=&%_\-]*)?(\/[-a-zA-Z0-9_:@&?=+,.!/~*'%$]*)?",
                    flags=re.IGNORECASE)
                matches = reg10.finditer(result)
                for match in matches:
                    if match.group(0):
                        url = match.group(0).strip().replace('&amp;', '&')
                        if url:
                            url_list.append(url)
            elif "jpmorgan" in url:
                # 摩根大通英文pdf. Click here for the full Note and disclaimers
                reg11 = re.compile(
                    r'<a\\s+[^>]*href=\"http(s)?://markets.jpmorgan.com/research/email/[a-zA-Z0-9]*/[0-9a-zA-Z=&%/_\-]*\"[^>]*>(Click[^<]*)',
                    flags=re.IGNORECASE)
                matches = reg11.finditer(result)
                for match in matches:
                    if match.group(0):
                        url = match.group(0).strip().replace('&amp;', '&')
                        if url:
                            url = re.findall(
                                r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', url)
                            url_list.append(url[0])
                # 摩根大通中文pdf  点击这里以阅览报告全文及免责声明
                reg12 = re.compile(
                    r'<a\\s+[^>]*href=\"http(s)?://jpmsc.portal.jpmorgan.com/research/publicUnauthenticated/email/[a-zA-Z0-9]*/[0-9a-zA-Z=&%/_\-]*\"[^>]*>([\u70b9\u51fb][^<]*)',
                    flags=re.IGNORECASE)
                matches = reg12.finditer(result)
                for match in matches:
                    if match.group(0):
                        url = match.group(0).strip().replace('&amp;', '&')
                        if url:
                            url = re.findall(
                                r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', url)
                            url_list.append(url[0])
                # 过滤非摩根大通的url链接
                url_list = [strUrl for strUrl in url_list if "jpmorgan.com" in strUrl]

        url_set = list(set(url_list))
        url_set.sort(key=url_list.index)
        return url_set


class TitleAnalyzerBase:
    """标题分析器基类"""
    _instance = None

    def __init__(self):
        self.analysis_title = ''
        self.type_id = 24
        self.broker_id = 1000
        self.stk_code = ''
        self.researcher = ''
        self.investrank_origin = ''
        self.industryrank_origin = ''

    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            cls._instance = object.__new__(cls)
        return cls._instance

    @abstractmethod
    def analyze_title(self, msg, filter_title):
        pass


class TitleAnalyzer(TitleAnalyzerBase):
    def __init__(self):
        super().__init__()
        # self.msg = msg_instance

    def analyze_title(self, msg, filter_title):
        msg.analysis_title = filter_title
        msg.researcher = ''
        msg.type_id = 24
        broker_id = 1000
        if not RECV_CONF['receive-setting'].get('title-extract').get('not_ext_broker_id'):
            if db1_type == 1 or db1_type == 4 or db1_type == 5:
                mechanism_sql = "select brokername from " + table_km_brokermail + " where instr('{}', email) > 0".format(
                    msg.from_)
            elif db1_type == 2:
                mechanism_sql = "select brokername from " + table_km_brokermail + " where email like '%{}'".format(
                    msg.from_.split('@')[-1])
            else:
                mechanism_sql = ""
            mechanism_result = RECV_DB.search(mechanism_sql)
            mechanism = mechanism_result[0][0]
            logging.info("查询km_brokermail表，BrokerName为 '{}'.".format(mechanism))
            if mechanism:
                mechanism_sql = "select objid from {broker} where name = '{mechanism}'".format(broker=table_broker,
                                                                                               mechanism=mechanism)
                broker_result = RECV_DB.search(mechanism_sql)
                if broker_result:
                    broker_id = broker_result[0][0]
                    logging.info("查询broker表，匹配到BrokerId为 {}.".format(broker_id))
        msg.broker_id = broker_id
        msg.invest_rank_origin = ''
        msg.industry_rank_origin = ''
        msg.stk_code = ''
        # 正则匹配获取股票代码后，再从stock数据表中select相关信息
        reg = re.search(r'([0-9]{8})', msg.subject)
        if reg:
            logging.info("正则匹配到8位数字:{}，不做股票查询匹配".format(reg.group()))
        else:
            stkcode = re.search(r'([0-9]{6})', msg.subject)
            if stkcode:
                stkcode_sql = f"select stktype, stkcode from {table_stock} where stkcode = '{stkcode.group()}'"
                stk_result = RECV_DB.search(stkcode_sql)
                if stk_result:
                    stk_code = [r[1] for r in stk_result if r[0] == 2 or r[0] == 3 or r[0] == 4]
                    logging.info("查询stock表. StockId为 {}.".format(stk_code))
                    msg.stk_code = stk_code[0]


class TitleAnalyzerProxy(TitleAnalyzerBase):
    """
    代理模式：因nlp方法加载时间长，所以根据配置来选择要调用的类
    """
    def __init__(self, mode='regex'):
        super().__init__()
        self.recognition_mode = mode

    # nlp模式
    def analyze_title(self, msg, filter_title):
        try:
            self.analysis_title = filter_title
            # nlp识别模式
            if self.recognition_mode == 'nlp':
                logging.info("正在使用nlp进行标题识别")
                nlp_starttime = time.time()
                o_model = OriginTitleExtractor(model_path=BASE_PATH.joinpath('src/model'))
                logging.info(f"nlp模块加载耗时：{time.time() - nlp_starttime}")
                result = o_model.extraction(self.analysis_title)
                msg.analysis_title = self.analysis_title if self.analysis_title.startswith("XNCF_") else result.get(
                    "标题", "")
                msg.researcher = result.get("研究员", "")
                msg.stk_code = result.get("股票代码", "")
                typename = result.get("报告大类")
                type_id = 24
                broker_id = 1000
                if typename:
                    # 从doctype表中查询对应id
                    type_sql = f"select objid from {table_doctype} where name='{typename}'"
                    type_result = RECV_DB.search(type_sql)
                    if type_result:
                        type_id = type_result[0][0]
                msg.type_id = type_id
                broker = result.get("机构")
                if broker:
                    broker_sql = f"select objid from {table_broker} where name = '{broker}'"
                    broker_result = RECV_DB.search(broker_sql)
                    if broker_result:
                        broker_id = broker_result[0][0]
                msg.broker_id = broker_id
                msg.invest_rank_origin = ''
                msg.industry_rank_origin = ''
                if result.get("投资评级") and result.get("股票代码"):
                    msg.invest_rank_origin = result.get("投资评级")
                elif result.get("投资评级") and not result.get("股票代码"):
                    msg.industry_rank_origin = result.get("投资评级")
            else:
                TitleAnalyzer().analyze_title(msg, filter_title)
        except Exception as err:
            msg.analysis_title = self.analysis_title
            msg.researcher = self.researcher
            msg.type_id = self.type_id
            msg.broker_id = self.broker_id
            msg.stk_code = self.stk_code
            msg.invest_rank_origin = self.investrank_origin
            msg.industry_rank_origin = self.industryrank_origin
            logging.error(f"标题解析识别失败（func:analyze_title）, Error:{err}")

        logging.info(
            '标题解析完成，original_title:{original_title},analysis_title:{analysis_title},researcher:{researcher},type_id:{type_id},broker_id:{broker_id},stkcode:{stkcode}'.format(
                original_title=msg.subject, analysis_title=msg.analysis_title, researcher=msg.researcher,
                type_id=msg.type_id, broker_id=msg.broker_id,
                stkcode=msg.stk_code))

        if not msg.analysis_title:
            logging.warning("标题解析异常，解析后的标题为空。将影响后续附件的判重")

        return msg

