#调入分词的库
library("rJava")
library("Rwordseg")
#调入绘制词云的库
library("RColorBrewer")
library("wordcloud")
#读取xlsx文件的库
library("xlsx")

#文件变量
emotion_words_dic <- "./resources/emotion_words_dic_100.xlsx";
origin_file <- "D:/Voice/emchat/Files/SONY.txt";
stop_words_dic <- "./resources/stop_words_dic.txt"

getEmotionWords <- function() {
    #读取情感词汇
    mydataframe <- read.xlsx(emotion_words_dic,  1, encoding="UTF-8", colIndex=c(1,5,6,7), startRow=1, endRow=100)
    #读入数据(特别注意，read.csv竟然可以读取txt的文本)
    myfile<-read.csv(origin_file,header=FALSE);
    #预处理，这步可以将读入的文本转换为可以分词的字符，没有这步不能分词
    myfile.res <- myfile[myfile!=" "]
       
    #装载分词词典（如果需要的话，我这里没有装载，下面有说明）
       
    segmentCN(origin_file,returnType="tm")
    #分词，并将分词结果转换为向量
    myfile.words <- unlist(lapply(X = myfile.res,FUN = segmentCN))
       
    #剔除URL等各种不需要的字符，还需要删除什么特殊的字符可以依样画葫芦在下面增加gsub的语句
    myfile.words <- gsub(pattern="http:[a-zA-Z\\/\\.0-9]+","",myfile.words)
    myfile.words <- gsub("\n","",myfile.words)
    myfile.words <- gsub("　","",myfile.words)
    
    print(length(myfile.words));
    #emotion_words <- unlist(mydataframe$word);
    #print(mydataframe$word)
    
    newdata <- intersect(myfile.words, mydataframe$word)
    #for (i in 1:length(mydataframe$word)) {
    #    print(mydataframe$word[i]);
    #    myfile.words <- subset(myfile.words,myfile.words==mydataframe$word[i]);
    #}
    return(newdata);
}

newdata <- getEmotionWords();

