import re
import os
import nltk
from urlextract import URLExtract
import numpy as np
from sklearn.base import BaseEstimator,TransformerMixin
from sklearn.model_selection import train_test_split,cross_val_score
from html import unescape
import email
import email.policy
from collections import Counter
from scipy.sparse import csr_matrix
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score,recall_score

SPAM_PATH = os.path.join('datasets','spam')
HAM_DIR = os.path.join(SPAM_PATH,'easy_ham')
SPAM_DIR = os.path.join(SPAM_PATH,'spam')
ham_filenames = [name for name in sorted(os.listdir(HAM_DIR)) if len(name) > 20]
spam_filenames = [name for name in sorted(os.listdir(SPAM_DIR)) if len(name) > 20]

#我们可以使用email模块解析这些电子邮件（它处理邮件夹，邮码等）
def load_email(is_spam,filename,spam_path=SPAM_PATH):
    directory = 'spam' if is_spam else 'easy_ham'
    with open(os.path.join(spam_path,directory,filename),'rb') as f:
        return email.parser.BytesParser(policy=email.policy.default).parse(f)

# 让我们看一个ham示例和一个spam示例，了解数据的外观
ham_emails = [load_email(is_spam=False,filename=name) for name in ham_filenames]
spam_emails = [load_email(is_spam=True,filename=name) for name in spam_filenames]
# print(ham_emails[1].get_content().strip())
# print(spam_emails[6].get_content().strip())
#电子邮件实际上有很多部分，带有图像和附件（他们可以有自己的附件），查看邮件的各种类型的结构：
def get_email_structure(email):
    if isinstance(email,str):
        return email
    payload = email.get_payload()
    if isinstance(payload,list):
        return 'multipart({})'.format(','.join([
            get_email_structure(sub_email)
            for sub_email in payload
        ]))
    else:
        return email.get_content_type()

def structures_counter(emails):
    structures = Counter()
    for email in emails:
        structure = get_email_structure(email)
        structures[structure] +=1
    return structures
#正常邮件更多的是纯文本，垃圾邮件更多的是HTML
# print(structures_counter(ham_emails))
# print(structures_counter(spam_emails))
# for header,value in spam_emails[0].items():
#     print(header,':',value)
#里面可能有很多有用的信息，比如发件人的电子邮件地址（12a1mailbot1@web.de）看起来很可爱
#查看‘主题’标题
# print(spam_emails[0]['subject'])
#拆分训练集和测试集
x = np.array(ham_emails+spam_emails)
y = np.array([0]*len(ham_emails)+[1]*len(spam_emails))
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=42)
#首先需要一个函数将HTML转换为纯文本，使用【Beautifulsoup】库，下面的函数首先删除<head>部分，然后将所有<a>标签转换为单词hyperlink，
#然后去掉所有html标记，只留下纯文本，为了可读性，他还用一个换行符替换多个换行符，最后它取消了html实体
def html_to_plain_text(html):
    text = re.sub('<head.*?>.*?</head>','',html,flags=re.M | re.S | re.I)
    text = re.sub('<a\s.*?>','HYPERLINK',text,flags=re.M | re.S | re.I)
    text = re.sub('<.*?>','',text,flags=re.M | re.S)
    text = re.sub(r'(\s*\n)+','\n',text,flags=re.M | re.S)
    return unescape(text)

html_spam_emails = [email for email in x_train[y_train==1] if get_email_structure(email)=='text/html']
sample_html_spam = html_spam_emails[7]
# print(sample_html_spam.get_content().strip()[:1000],"...")
# print(html_to_plain_text(sample_html_spam.get_content())[:1000],"...")
#编写一个函数，它以电子邮件为输入，并以纯文本形式返回其内容，无论其格式是什么
def email_to_text(email):
    html=None
    for part in email.walk():
        ctype = part.get_content_type()
        if not ctype in ("text/plain",'text/html'):
            continue
        try:
            content = part.get_content()
        except: #解决编码问题
            content = str(part.get_payload())
        if ctype=='text/plain':
            return content
        else:
            html=content

    if html:
        return html_to_plain_text(html)
# print(email_to_text(sample_html_spam)[:100],'...')
# 若将所有处理整合到一个转换器中，我们将使用它将电子邮件转换为文字计数器。注意，我们使用python的‘split（）’方法将句子拆分为单词
# 该方法使用空格作为单词边界。但例如，汉语和日语脚本通常不在单词之间使用空格在这个练习中没关系，因为数据集（主要）是英文的，中文可以使用结巴分词来进行拆分
# try:
#     stemmer = nltk.PorterStemmer()
#     for word in ('Computations','Computation','Computing','Computed','Compute','Complusive'):
#         print(word,'=>',stemmer.stem(word))
# except ImportError:
#     print('Error: stemming requires the NLTK module.')
#     stemmer = None
stemmer = nltk.PorterStemmer()
class EmailWordCounterTransformer(BaseEstimator,TransformerMixin):
    def __init__(self,strip_headers=True,lower_case=True,remove_punctuation=True,
                 replace_urls=True,replace_numbers=True,stemming=True):
        self.strip_headers=strip_headers
        self.lower_case=lower_case
        self.remove_punctuation=remove_punctuation
        self.replace_urls=replace_urls
        self.replace_numbers=replace_numbers
        self.stemming=stemming
    def fit(self,x,y=None):
        return self
    def transform(self,x,y=None):
        x_transformed=[]
        for email in x:
            text = email_to_text(email) or ""
            if self.lower_case:
                text = text.lower()
            if self.replace_urls:
                extractor = URLExtract()
                urls = list(set(extractor.find_urls(text)))
                urls.sort(key=lambda url: len(url),reverse=True)
                for url in urls:
                    text=text.replace(url,'URL')
            if self.replace_numbers:
                text = re.sub(r'\d+(?:\.\d*(?:[eE]\d+))?','NUMBER',text)
            if self.remove_punctuation:
                text = re.sub(r'\W+',' ',text,flags=re.M)
            word_counts = Counter(text.split())
            if self.stemming and stemmer is not None:
                stemmed_word_counts = Counter()
                for word,count in word_counts.items():
                    stemmed_word = stemmer.stem(word)
                    stemmed_word_counts[stemmed_word] += count
                word_counts = stemmed_word_counts
            x_transformed.append(word_counts)
        return np.array(x_transformed)
#在一些邮件上测试转换器
x_few = x_train[:3]
x_few_wordcounts=EmailWordCounterTransformer().fit_transform(x_few)
# print(x_few_wordcounts)
#有了单词计数，我们需要把它们转换成向量，为此，我们将构建另一个转换器，其“fit（）方法”将构建词汇表（最常用单词的有序列表），其“transform()"方法将使用词汇表将单词计数转换为向量--稀疏矩阵
class WordCounterToVectorTransformer(BaseEstimator,TransformerMixin):
    def __init__(self,vocabulary_size=1000):
        #词汇量
        self.vocabulary_size = vocabulary_size
    def fit(self,x,y=None):
        total_count=Counter()
        for word_count in x:
            for word,count in word_count.items():
                #设置数据上限
                total_count[word]+=min(count,10)
        most_common = total_count.most_common()[:self.vocabulary_size]
        self.most_common = most_common
        self.vocabulary_ = {word:index+1 for index,(word,count) in enumerate(most_common)}
        return self
    def transform(self,x,y=None):
        rows=[]
        cols=[]
        data=[]
        for row,word_count in enumerate(x):
            for word,count in word_count.items():
                #训练集 实例个数
                rows.append(row)
                #取得单词在词汇表中的索引位置，0代表未出现在词汇表中
                cols.append(self.vocabulary_.get(word,0))
                data.append(count)
            # 输出稀疏矩阵
        return csr_matrix((data,(rows,cols)),shape=(len(x),self.vocabulary_size+1))

vocab_transformer = WordCounterToVectorTransformer(vocabulary_size=10)
x_few_vectors = vocab_transformer.fit_transform(x_few_wordcounts)
# print(x_few_vectors.toarray())
#词汇表
# print(vocab_transformer.vocabulary_)
#流水线
preprocess_pipeline = Pipeline([
    ('email_to_wordcount',EmailWordCounterTransformer()),
    ('wordcount_to_vector',WordCounterToVectorTransformer()),
])

x_train_transformed = preprocess_pipeline.fit_transform(x_train)
log_clf = LogisticRegression(solver='liblinear',random_state=42)
score = cross_val_score(log_clf,x_train_transformed,y_train,cv=3,verbose=3)
# print(score.mean())

x_test_transformed = preprocess_pipeline.transform(x_test)
log_clf.fit(x_train_transformed,y_train)
y_pred = log_clf.predict(x_test_transformed)

# print('精度：{:.2f}%'.format(100 * precision_score(y_test,y_pred)))
# print('召回：{:.2f}%'.format(100 * recall_score(y_test,y_pred)))

#总结
#加载数据并纵观2数据大局
#获取邮件的组成结构
#对数据结构类型进行分析，发现垃圾邮件大多有HTML结构
#数据清洗，定义email对象中的HTML转换纯文本的方法
#对数据集拆分成训练集和测试集
#数据处理转换，对文本的内容进行分词处理，通过nltk进行词干提取，汇总邮件中频繁词汇的技术统计
#将单词计数转化成向量矩阵
#把数据处理和数据清洗封装成两个转换器
#通过流水线来自动化处理数据
#通过逻辑回归线性分类器进行模型训练
#使用交叉验证进行微调
#在测试集上得到精度/召回率
