import pandas as pd
import jieba
import numpy as np


# 数据导入
def load_data(path_neg, path_pos):
    data_neg = pd.read_csv(path_neg, encoding="utf-8")
    data_pos = pd.read_csv(path_pos, encoding="utf-8")
    data_neg_list = list(data_neg["remark"])
    data_pos_list = list(data_pos["remark"])
    label = np.array([1] * len(data_pos_list) + [0] * len(data_neg_list))
    return data_neg_list, data_pos_list, label


# 导入中文停用词
def chinese_stopwords(path):
    with open(path, mode='r', encoding='utf-8-sig') as f:
        return [word.strip() for word in f]


# 分词函数，并去停用词
def fenci(comment):
    stopwords = chinese_stopwords("D:/data/sentiment-analysis/stopwords.txt")
    comment = comment.replace(' ', '')  # 去掉space
    word_cut = jieba.cut(comment, cut_all=False)  # 精确模式，返回的结构是一个可迭代的generator
    word_list = list(word_cut)  # 转化为list，每个词unicode格式
    return [word.strip() for word in word_list if word not in stopwords and len(word) > 1 and not word.isdigit()]


# 数据清理函数
def vector(array):
    data_processed = []
    for j in array:
        one = fenci(j)
        # 将数组转为String
        string = " ".join(one)
        data_processed.append(string)
    return data_processed


def main():
    # 文件路径
    path_positive = "D:/data/sentiment-analysis/pos.csv"
    path_negative = "D:/data/sentiment-analysis/neg.csv"
    # 数据加载
    neg, pos, label = load_data(path_negative, path_positive)
    # 数据清理
    pos_processed = vector(pos)
    neg_processed = vector(neg)
    # 数据合并
    data_all = pos_processed+neg_processed
    # 写入指定csv
    dataframe = pd.DataFrame({'id': label, 'remark': data_all})
    dataframe.to_csv("svmNlp.csv", index=False, sep=',', encoding='utf_8_sig')


if __name__ == '__main__':
    main()
