#调入分词的库
library("rJava")
library("Rwordseg")
#调入绘制词云的库
library("RColorBrewer")
library("wordcloud")
#读取xlsx文件的库
library("xlsx")
library("tm")
library("SnowballC")
#source("GetEmotionWords.r")

#文件变量
emotion_words_dic <- "./resources/emotion_words_dic_100.xlsx";
origin_file <- "D:/Voice/emchat/Files/SONY_segment.xlsx";
stop_words_dic <- "./resources/stop_words_dic_1.xlsx";

#读取评论，已经分词的了
mydataframe <- read.xlsx(origin_file,  1, encoding="UTF-8", stringsAsFactors=FALSE);
#build a corpus, and specify the source to be character vectors
#use DataframeSource instead of VectorSource(mydataframe$text) to fix the ’错误: 不是所有的inherits(doc, "TextDocument")都是TRUE‘
myCorpus <- Corpus(DataframeSource(mydataframe)); 

#*************data clean******************
#remove punctuation
myCorpus <- tm_map(myCorpus, removePunctuation);
#remove numbers
myCorpus <- tm_map(myCorpus, removeNumbers)
#remove URLs
#removeURL <- function(x) gsub("http[[:alnum:]]*", "", x);
#myCorpus <- tm_map(myCorpus, removeURL); //this can makes myCorpus not PlainTextDocument , case the ’错误: 不是所有的inherits(doc, "TextDocument")都是TRUE‘
#remove space
myCorpus <- tm_map(myCorpus, stripWhitespace);
  
#remove stop words
mystopwordframe <- read.xlsx(stop_words_dic,  1, encoding="UTF-8", colClasses="character", stringsAsFactors=FALSE);
mystopword <- mystopwordframe$text;
myCorpus <- tm_map(myCorpus, removeWords, mystopword);

#************extract stem********************
#keep a copy of corpus to use letter as dictionary for stem competition
#myCorpusCopy <- myCorpus;
#stem words
#myCorpus <- tm_map(myCorpus, stemDocument);

#************Term docutment matrix***************
#myCorpusClean <- tm_map(myCorpus, PlainTextDocument); the myCorpus must be PlainTextDocument
myTdm <- TermDocumentMatrix(myCorpus, control=list(wordLengths=c(1, Inf)));


#************draw frequency bar ********************
termFrequency <- rowSums(as.matrix(myTdm));
termFrequency <- subset(termFrequency, termFrequency >= 10);
barplot(termFrequency, las=2)

#find associate
findAssocs(myTdm, '索尼', 0.25)

#************** draw words cloud *********************
m <- as.matrix(myTdm);
#calculate the frequency of words and sort it descendingly by frequency
wordFreq <- sort(rowSums(m), decreasing=TRUE);
#word cloud
set.seed(375) # to make it reproducible
grayLevels <- gray((wordFreq + 10) / (max(wordFreq) + 10));

#绘制词云
#设置一个颜色系：
mycolors <- brewer.pal(8,"Dark2")
#设置字体
windowsFonts(myFont=windowsFont("华文彩云"))

wordcloud(words=names(wordFreq), freq=wordFreq, min.freq=3, random.order=F, colors=mycolors)

#*************** term cluster *************
#remove sparse terms
myTdm2 <- removeSparseTerms(myTdm, sparse = 0.95);
m2 <- as.matrix(myTdm2);
#cluster terms
distMatrix <- dist(scale(m2));
fit <- hclust(distMatrix, method = "ward.D");
plot(fit);
#cut tree into 10 clusters
rect.hclust(fit, k = 10);
(groups <- cutree(fit, k = 10));


#***********k-means *************
#transpose the matrix to cluster documents
m3 <- t(m2);
#set a fixed random seed
set.seed(122);
#k-means clustering
k <- 8;
kmeansResult <- kmeans(m3, k);
#cluster centers
round(kmeansResult$centers, digits=3);

#show the context of each cluster
for (i in 1: k) {
    cat(paste("cluster", i, ":", sep=""));
    s <- sort(kmeansResult$centers[i,], decreasing = T);
    cat(names(s)[1:5], "\n");
    #print the context of every cluster
    
}

#*************k-medoids ****************
library(fpc)
#partitioning around medoids with estimation of number of clusters
pamResult <- pamk(m3, krange=8, metric="manhattan");
#number of clusters identified
(k <- pamResult$nc)
pamResult <- pamResult$pamobject;
#print cluster medoids
for (i in 1:k) {
    cat(paste("cluster", i, ": "));
    cat(colnames(pamResult$medoids)[which(pamResult$medoids[i,]==1)], "\n");
}
#plot clustering result
layout(matrix(c(1,2),2,1)) #set to two graphs per page
plot(pamResult, color=F, labels=4, lines=0, cex=.8, col.clus=1, col.p=pamResult$clustering);
layout(matrix(1)) #change back to one graph per page


