from torchtext.data import Field
from torchtext import data
from tqdm import tqdm
import yaml
import spacy
import re

"""
conda 安装apacy
conda install -c conda-forge spacy

conda 安装语言模型
conda install -c conda-forge spacy-model-en_core_web_lg
conda install -c conda-forge spacy-model-en_core_web_sm
词向量下载
https://www.kaggle.com/datasets/anindya2906/glove6b
"""
nlp = spacy.load('en_core_web_sm')  # 英语的管道


def clean_text(text):
    # 去掉网址
    text = re.sub(r"https?://\S+", "", text)
    text = re.sub(r"what's", "what is", text)  # 可以删
    text = re.sub(r"Won't", "will not", text)  # 不能删
    text = re.sub(r"can't", "can not", text)  # 不能删
    text = re.sub(r"\'s", " ", text)
    text = re.sub(r"\'ve", " have", text)
    text = re.sub(r"n't", " not", text)
    text = re.sub(r"i'm", "i am", text)
    text = re.sub(r"\'re", " are", text)
    text = re.sub(r"\'d", " would", text)
    text = re.sub(r"\'ll", " will", text)
    text = re.sub(r"e - mail", "email", text)
    text = re.sub("\d+ ", "NUM", text)
    text = re.sub(r"<br />", '', text)
    text = re.sub(r'[\u0000-\u0019\u0021-\u0026\u0028\u0040\u007a-\uffff]', '', text)  # 除了空格'英语字母全去掉

    return text


def tokenize(s):
    doc = nlp(s)  # 完成分词了
    return [word.lemma_ for word in doc if not word.is_stop]  # 分词后标准化并且去掉停止词


# 1) 词的清洗 2）去标点 3）分词 4）词的标准化
def tokenize_en(text):
    """
    Tokenizes English text from a string into a list of strings (tokens)
    """
    text = clean_text(text)
    text = tokenize(text)
    return text

#定义Dataset
class MyDataset(data.Dataset):

    def __init__(self,datatuple,text_field,label_field,test=False):#datatuple指的是元组('this moive is great',1)
        fields = [("text",text_field),("label",label_field)]
        lists = []
        if test:
            # 如果为测试集，则不加载label
            for content,label in tqdm(datatuple):
                lists.append(data.Example.fromlist([content, None], fields))
        else:
            for content, label in tqdm(datatuple):
                # Example: Defines a single training or test example.Stores each column of the example as an attribute.
                lists.append(data.Example.fromlist([content,label], fields))
        # 之前是一些预处理操作，此处调用super初始化父类，构造自定义的Dataset类
        super().__init__(lists, fields)
if __name__ == "__main__":
    with open("config.yaml", 'r', encoding='utf-8') as ymlfile:
        config = yaml.load(ymlfile, Loader=yaml.SafeLoader)
        print(config)

    SRC = Field(tokenize=tokenize_en, init_token=config["init_token"], eos_token=config["eos_token"],
                lower=True, fix_length=config["fix_length"])

    LABEL = Field(sequential=False, use_vocab=False)
