import  os
import re
import jieba
import pandas as pd
# 读取文件 返回内容
from nltk import WordNetLemmatizer


def load_file(filename):
    with open(filename,'r',encoding='utf-8') as file :
        text = file.read()
        file.close()
    return text


# 加载某目录下的文件名/目录名
def load_dir_list(path):
    path_list = os.listdir(path)
    return path_list


#停用词加载
english_stopwords = []
with open("../db/停用词表/stopwords-master/english.txt", "r", encoding="utf8") as f:
    for w in f:
        english_stopwords.append(w.strip())
chinese_stopwords = []
with open("../db/停用词表/stopwords-master/cn_stopwords.txt", "r", encoding="utf8") as f:
    for w in f:
        chinese_stopwords.append(w.strip())

def chinenes_processing(text):
    """
    数据预处理, 可以根据自己的需求进行重载
    """
    # 数据清洗部分
    text = re.sub("\{%.+?%\}", " ", text)           # 去除 {%xxx%} (地理定位, 微博话题等)
    text = re.sub("@.+?( |$)", " ", text)           # 去除 @xxx (用户名)
    text = re.sub("【.+?】", " ", text)              # 去除 【xx】 (里面的内容通常都不是用户自己写的)
    text = re.sub("\u200b", " ", text)              # '\u200b'是这个数据集中的一个bad case, 不用特别在意
    # 分词
    words = [w for w in jieba.lcut(text) if w.isalpha()]
    # 对否定词`不`做特殊处理: 与其后面的词进行拼接
    while "不" in words:
        index = words.index("不")
        if index == len(words) - 1:
            break
        words[index: index+2] = ["".join(words[index: index+2])]  # 列表切片赋值的酷炫写法
    # 用空格拼接成字符串
    result = " ".join(words)
    return result


def english_processing(text):
    """
    数据预处理, 可以根据自己的需求进行重载
    """
    # 数据清洗部分
    text = re.sub("\{%.+?%\}", " ", text)  # 去除 {%xxx%} (地理定位, 微博话题等)
    text = re.sub("@.+?( |$)", " ", text)  # 去除 @xxx (用户名)
    text = re.sub("【.+?】", " ", text)  # 去除 【xx】 (里面的内容通常都不是用户自己写的)
    text = re.sub("\u200b", " ", text)  # '\u200b'是这个数据集中的一个bad case, 不用特别在意
    # 分词
    words = [w for w in jieba.lcut(text) if w.isalpha()]
    # 用空格拼接成字符串
    result = " ".join(words)
    return result

## 读取清华文本数据集,将其转化为有sentiment的df表
def load_thucnews():
    # 获取/data/thucnews/下的目录列表
    thucnews_path='../db/text_classification_db/thucnews/'
    thucnews_dir_paths = load_dir_list(thucnews_path)
    # 获取thucnews_path_list下的各个文件名称
    data = []
    for dir_path in thucnews_dir_paths:
        file_path = thucnews_path+dir_path
        # 获取文件名字列表
        file_names = load_dir_list(file_path)
        for file_name in file_names:
            filename = file_path+'/'+file_name
            text = load_file(filename)
            text_split = chinenes_processing(text)
            data.append((text_split,dir_path))
    return data

## 读取IMDB数据集
def load_IMDBDatas():
    IMDB_data = pd.read_csv('../db/text_classification_db/IMDB/IMDB Dataset.csv')
    index = IMDB_data['review'].count()
    for i in range(int(index)):
        IMDB_data['review'][i] =english_processing(IMDB_data['review'][i])
    return IMDB_data