library(dplyr)
library(ggplot2)
library(bibliometrix) 
library(forcats)
library(reshape2)
library(wordcloud2) 
library(treemap)
library(ggpp)
library(Cairo)
library(webshot)
library(htmlwidgets)
library(openxlsx)
library(FactoMineR)

ismy <- file.exists("F:/rproject/wosexporttest/")
if(ismy){
  setwd("F:/rproject/wosexporttest")
}else{
  setwd('/home/pubmedr/gcr/20231116')
}
getwd()
source('utils.R')

# 此R 删除生成图片操作只是获取数据生成excel java 生成图标数据

# https://www.bibliometrix.org/vignettes/Introduction_to_bibliometrix.html
# bibliometrix
# 2023年11月28日09:27:11  测试wos 导出带参文进行画图
# 老板意思配合pm 进行 输入一个数据源 出来很多图 
#file<- c('savedrecsresceshighcitedback.txt')
file<- c('savedrecsre1.txt')
# file <- "https://www.bibliometrix.org/datasets/pubmed_txt.txt"
getwd()


M <- convert2df(file = file, dbsource = "wos", format = "plaintext")
M$TC <- as.numeric(M$TC)
# M<- M[which(!is.na(M$UT)),]
topS <- 10
if(dim(M)[1]>=topS){
  topS <- 10
}else{
  topS <- dim(M)[1]
}
k<-10
print(topS)
results <- biblioAnalysis(M, sep = ";")
options(width=100)
S <- summary(object = results, k = topS, pause = FALSE)


# 参考文章 https://blog.csdn.net/qq_37364789/article/details/115395649
# http://www.idata8.com/rpackage/bibliometrix/00Index.html api 查询地址
# biblioshiny()
# 下面这个方法会报错 不用了 直接ggplot2 进行画图
# plot(x = results, k = 10, pause = T)
# biblioshiny()

MostRelSources<-as.data.frame(S$MostRelSources)
AnnualProduction <-as.data.frame(S$AnnualProduction)
# colnames(AnnualProduction)

colnames(AnnualProduction)<- c('Year','Articles')
MostRelSources$Articles <- as.numeric(as.character(MostRelSources$Articles))
AnnualProduction$Year <- as.numeric(as.character(AnnualProduction$Year))


colnames(MostRelSources)<- c('SO','Articles')
MostRelSources$Sources <- (as.character(MostRelSources$SO))
MostRelSources$Articles <- as.numeric(as.character(MostRelSources$Articles))
MostRelSources$SO <- trimws(MostRelSources$SO)
# 自己获取

M$TC <- as.numeric(M$TC)
MostRelSourcesTAB <- M %>%
  select(.data$SO,.data$PM) %>%
  group_by(.data$SO) %>%
  mutate(PMIDS =paste(.data$PM, collapse = ",")) %>%
  select(.data$SO,PMIDS) %>%
  as.data.frame()
MostRelSourcesTAB<-distinct(MostRelSourcesTAB,SO,PMIDS)
MostRelSourcesDATA1 <-dplyr::left_join(MostRelSources,MostRelSourcesTAB,by = 'SO')
MostRelSourcesDATA1 <-MostRelSourcesDATA1[,-3]
write.xlsx(MostRelSourcesDATA1,file = 'MostRelSourcesDATA.xlsx')
# 至此 发文最多结束存储在本地表

# 期刊 这个后期java 计算吧 不好做 就是期刊论文数量前十
# 【Most Relevant Sources 最相关的期刊 N. of Documents】
# most journal
# Cairo::CairoJPEG(file="MostRelSources.jpeg", width=12, height=12,units="in",dpi=130)
# ggplot(MostRelSources,  aes(x =  fct_reorder(Sources, (Articles)), y = Articles))  +
#   # 条形图函数：fill设置条形图填充色，colour设置条形图边界颜色
#   geom_bar(stat = "identity", fill = "lightblue", colour = "black") +
#   # 字体
#   theme(text = element_text(size = 15))+
#   # 修改别名
#   labs(title = "MostRelSources",y = 'Articles',x = 'Journal' )+
#   # xy 反转
#   coord_flip()
# dev.off()

# 【Most Cited Sources 总引用数最多的期刊】 导出的没有被引 无
TCperYear = NULL
PY = NULL
TC = NULL
Tags <- names(M)
if ("PY" %in% Tags) {
  PY <- as.numeric(M$PY)
}
if ("TC" %in% Tags) {
  TC = as.numeric(M$TC)
  CurrentYear = as.numeric(format(Sys.Date(), "%Y"))
  TCperYear = TC/(CurrentYear - PY + 1)
  if (!("DI" %in% names(M))) 
    M$DI <- ""
  MostCitedPapers <- data.frame(M$SR,M$UT, M$DI, TC, TCperYear, 
                                PY) %>% group_by(.data$PY) %>% mutate(NTC = .data$TC/mean(.data$TC)) %>% 
    ungroup() %>% select(-.data$PY) %>% arrange(desc(.data$TC)) %>% as.data.frame()
  names(MostCitedPapers) = c("Paper","PMID", "DOI", "TC", "TCperYear", "NTC")
}
# 
# 取前十 DATA2
MostCitedPapers <- MostCitedPapers[c(1:topS),]
write.xlsx(MostCitedPapers,file = 'MostCitedPapersDATA.xlsx' )

# MG <- M
# rownames(MG) <- NULL
# write.xlsx(MG,'MG.xlsx')


# 
# 分组计算  group_by mutate 求组内得分 修正数据展示 这个是可以实现分组后根据期刊获取这些期刊全部pmid
# 【Source Impact 期刊影响力 H-index排序】 pm 没有这个参数
# 【Source Growth 期刊随时间的变化】
# 它计算每年出版的顶级来源的文件
# 这个才用什么呢 https://echarts.apache.org/examples/zh/editor.html?c=line-stack
topSo = sourceGrowth(M, top = 10, cdf = TRUE)
DFtopSo=melt(topSo, id='Year')
# 这个值就是 年份加期刊发文你数量折线图 折线图最大代表全部发文量
write.xlsx(DFtopSo,'Source Growth.xlsx')

# Cairo::CairoJPEG(file="SourceGrowth.jpeg", width=16, height=16,units="in",dpi=130)
# ggplot(DFtopSo,aes(Year,value, group=variable, color=variable))+geom_line(stat = "identity",size=1.1)+
#   theme_classic()+
#   theme(
#     # 设置图例的文字大小为10号
#     legend.text=element_text(size=15),
#     # 设置图标题位置和文字大小
#     plot.caption = element_text(hjust=0.5, size=16),
#     axis.text=element_text(size=15),
#     # 设置轴标题文字大小和文字加粗
#     axis.title=element_text(size=15,face="bold")
#   ) +
#   # 下坐标刻度
#   scale_x_continuous(breaks=seq(min(DFtopSo$Year), max(DFtopSo$Year), 20))+
#   labs(title = "SourceGrowth")
# dev.off()
# 作者 

# 【Most Relevant Authors 最相关作者 按Articles Fractionalized 排序】
# 作者 最多 这个不做了 直接才用MG 数据进行 合并java 进行处理
MostProdAuthors<-as.data.frame(S$MostProdAuthors)
MostProdAuthors<-MostProdAuthors[,-3]
colnames(MostProdAuthors)<-c('Authors','Articles','ArticlesFractionalized')
MostProdAuthors$Authors <- (as.character(MostProdAuthors$Authors))
MostProdAuthors$Articles <- as.numeric(as.character(MostProdAuthors$Articles))
MostProdAuthors$ArticlesFractionalized <- as.numeric(as.character(MostProdAuthors$ArticlesFractionalized))
MostProdAuthors<-arrange(MostProdAuthors,-Articles)
# 测试tab 进行获取数据
# listAU <- (strsplit(M$AU, ";"))
# listAU[1][1]
# nAU <- lengths(listAU)
# fracAU <- rep(1/nAU,nAU)
# TAB <- tibble(Author=unlist(listAU), fracAU=fracAU) %>% 
#   group_by(.data$Author) %>% 
#   summarize(
#     Articles = n(),
#     AuthorFrac = sum(.data$fracAU)
#   ) %>% 
#   arrange(desc(.data$Articles)) %>% as.data.frame()
# names(TAB)=c("Authors","Articles","Articles Fractionalized")
# print(S$MostProdAuthors)

# df <- tibble(x = 16, y = 0, autb = list(MostProdAuthors))
# CairoJPEG(file="Most Relevant Authors.jpeg", width=16, height=16,units="in",dpi=130)
# ggplot(MostProdAuthors,aes(x= fct_reorder(Authors, Articles),y=Articles))+
#   geom_bar(stat = "identity", fill = "lightblue", colour = "red") +
#   # 字体
#   theme(text = element_text(size = 15))+
#   labs(title = "Most Relevant Authors" )+
#   # x y 反转
#   coord_flip()+
#   geom_table(data = df, aes(x = x, y = y, label=autb))
# dev.off()

# 【Most Local Cited Authors 被同行引用最多的】 无
#【Author Productivity through Lotka’s Law】 无

# 【Author Impact H-index】 无
# Hindex(M, field = "AU", elements, sep = ";", years = 10)
# Authorsres<-as.data.frame(results$Authors)

#作者发文时间图 自己做的
# Cairo::CairoJPEG(file="authorProdOverTime.jpeg", width=16, height=16,units="in",dpi=130)
# topAU <- authorProdOverTime(M, k = 10, graph = TRUE)
# dev.off()
# ##############################
k <- 10
if (!("DI" %in% names(M))) {
  M$DI = "NA"
}
M$TC <- as.numeric(M$TC)
M$PY <- as.numeric(M$PY)
M <- M[!is.na(M$PY), ]
Y <- as.numeric(substr(Sys.time(), 1, 4))
# 这个是把每个作者分割
listAU <- (strsplit(M$AU, ";"))
# 每个集合的数量
nAU <- lengths(listAU)
df <- data.frame(AU = trimws(unlist(listAU)), UT = rep(M$UT, nAU))
AU <- df %>% group_by(.data$AU) %>% count() %>% arrange(desc(.data$n)) %>% ungroup()
k <- min(k, nrow(AU))
AU <- AU %>% slice_head(n = k)
df <- df %>% right_join(AU, by = "AU") %>% left_join(M, 
                                                     by = "UT") %>% select(.data$AU.x, .data$PY, .data$TI, 
                                                                           .data$UT, .data$DI, .data$TC) %>% mutate(TCpY = .data$TC/(Y - 
                                                                                                                                       .data$PY + 1)) %>% group_by(.data$AU.x) %>% mutate(n = length(.data$AU.x)) %>% 
  ungroup() %>% rename(Author = .data$AU.x, year = .data$PY, 
                       DOI = .data$DI) %>% arrange(desc(.data$n), desc(.data$year)) %>% 
  select(-.data$n)
write.xlsx(df,'authorProdOverTime.xlsx')
# 

# ##############################





# 【Most Relevant Affiliations 发文量最多的机构】
Affiliations<-as.data.frame(results$Affiliations)
colnames(Affiliations)<-c('Affiliations','Articles')
Affiliations$Affiliations <- (as.character(Affiliations$Affiliations))
Affiliations$Articles <- as.numeric(as.character(Affiliations$Articles))
# 取发文机构前十 这个计算是错误的 一个文章多个人发文单位一致就重复计算 这里不对  2023年12月12日12:01:32
Affiliations<- Affiliations[c(1:10),]
# 合并数量展示pmid 2023年12月12日10:36:36
# 
# if (!("AU_UN" %in% Tags)) {
#   M = metaTagExtraction(M, Field = "AU_UN")
# }
# AFF = M$AU_UN
# listAU <- (strsplit(M$AU_UN, ";"))
# 每个集合的数量
# nAU <- lengths(listAU)
# df <- data.frame(AU = trimws(unlist(listAU)), PM = rep(M$UT, nAU))
# AU <- df %>% group_by(.data$AU) %>% count() %>% arrange(desc(.data$n)) %>% ungroup()
# k <- min(k, nrow(AU))
# AU <- AU %>% slice_head(n = k)
# df <- df %>% right_join(AU, by = "AU") %>% left_join(M, 
#                                                      by = "PM") %>% select(.data$AU.x, .data$PY, .data$TI, 
#                                                                            .data$PM, .data$DI, .data$TC) %>% mutate(TCpY = .data$TC/(Y - 
#                                                                                                                                        .data$PY + 1)) %>% group_by(.data$AU.x) %>% mutate(n = length(.data$AU.x)) %>% 
#   ungroup() %>% rename(Author = .data$AU.x, year = .data$PY, 
#                        DOI = .data$DI) %>% arrange(desc(.data$n), desc(.data$year)) %>% 
#   select(-.data$n)
# asadad <- df %>% group_by(.data$Author) %>% count()  %>% arrange(desc(.data$n))
# df[which(df$AU=='UNIV CALIF SAN DIEGO'),]
# 
# Affiliation = sort(table(df), decreasing = TRUE)

################################


# dfAffiliations <- tibble(x = 16, y = 0, autb = list(Affiliations))
# Cairo::CairoJPEG(file="Most Relevant Affiliations.jpeg", width=16, height=16,units="in",dpi=130)
# ggplot(Affiliations,aes(x= fct_reorder(Affiliations,Articles),y=Articles))+
#   geom_bar(stat = "identity", fill = "yellow", colour = "red") +
#   # 字体
#   theme(text = element_text(size = 15))+
#   labs(title = "Most Relevant Affiliations" )+
#   # x y 反转
#   coord_flip()+
#   geom_table(data = dfAffiliations, aes(x = x, y = y, label=autb))
# dev.off()

# year article trend
# R 语言还是不错的 挺好玩 几句就统计出每年的pmid 和数量
yeararticletrendTAB <- M %>%
  select(.data$PY,.data$PM) %>%
  group_by(.data$PY) %>%
  mutate(PMIDS =paste(.data$PM, collapse = ",")) %>%
  mutate(nsize =length(.data$PM)) %>%
  select(.data$PY,PMIDS,nsize) %>%
  as.data.frame()  %>%distinct() %>% arrange(desc(.data$PY))
write.xlsx(yeararticletrendTAB,'yeararticletrendTAB.xlsx')

# 组合数据
# Cairo::CairoJPEG(file="year article trend.jpeg", width=16, height=16,units="in",dpi=130)
# ggplot(AnnualProduction,  aes(x =  Year, y = Articles))  +
#   # 条形图函数：fill设置条形图填充色，colour设置条形图边界颜色
#   geom_bar(stat = "identity", fill = "lightblue", colour = "black") +
#   # 字体
#   theme(text = element_text(size = 15))+
#   # 修改别名
#   labs(title = "year article trend" )+ scale_x_continuous(breaks=seq(min(AnnualProduction$Year), max(AnnualProduction$Year), 1))
# dev.off()
# 最多发文国家
# RP 字段决定的  
# MRP <- M[,c('RP','PM')]
# split_list <- sapply(MRP$RP,function(x) tail(strsplit(x, ",")[[1]], 1))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # 
sep=";"
if (!("AU_CO" %in% names(M))){
  print("AU_CO 不存在M")
  
  M=metaTagExtraction(M,Field="AU_CO",sep)
  }
if (!("AU1_CO" %in% names(M))){
  print("AU1_CO 不存在M")
  M=metaTagExtraction(M,Field="AU1_CO",sep)
  }

M$nCO <- as.numeric(unlist(lapply(strsplit(M$AU_CO,";"), function(l){
  length(unique(l))>1
})))

M$AU1_CO=trim(gsub("[[:digit:]]","",M$AU1_CO))
M$AU1_CO=gsub("UNITED STATES","USA",M$AU1_CO)
M$AU1_CO=gsub("RUSSIAN FEDERATION","RUSSIA",M$AU1_CO)
M$AU1_CO=gsub("TAIWAN","CHINA",M$AU1_CO)
M$AU1_CO=gsub("ENGLAND","UNITED KINGDOM",M$AU1_CO)
M$AU1_CO=gsub("SCOTLAND","UNITED KINGDOM",M$AU1_CO)
M$AU1_CO=gsub("WALES","UNITED KINGDOM",M$AU1_CO)
M$AU1_CO=gsub("NORTH IRELAND","UNITED KINGDOM",M$AU1_CO)

MostProdCountriesdf <- M %>% group_by(.data$AU1_CO) %>% 
  select(.data$AU1_CO,.data$nCO,`UT`) %>% 
  summarize(Articles=n(),
            SCP=sum(.data$nCO==0),
            MCP=sum(.data$nCO==1),
            pminfo = paste(UT,collapse = ",")) %>% 
  rename(Country = .data$AU1_CO) %>% 
  arrange(desc(.data$Articles))
if(dim(MostProdCountriesdf)[1]>=topS){
  MostProdCountriesdf <- MostProdCountriesdf[c(1:topS),]
}

write.xlsx(MostProdCountriesdf,'MostProdCountriesdf.xlsx')
# 2023年12月12日16:45:54 今天做到这里
# 至此数据整合完毕

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # 
######
MostProdCountries<-as.data.frame(S$MostProdCountries)
MostProdCountries$Country<-as.character(MostProdCountries$Country)
MostProdCountries$Articles<-as.character(MostProdCountries$Articles)
MostProdCountriesUse<- MostProdCountries[,c('Country','Articles')]
MostProdCountriesUse$Articles<-as.numeric(MostProdCountriesUse$Articles)
# Cairo::CairoJPEG(file="Country Scientific Production.jpeg", width=16, height=16,units="in",dpi=130)
# ggplot(MostProdCountriesUse,  aes(x =  fct_reorder(Country, (Articles)), y = Articles))  +
#   # 条形图函数：fill设置条形图填充色，colour设置条形图边界颜色
#   geom_bar(stat = "identity", fill = "lightblue", colour = "black") +
#   # 修改别名
#   labs(title = "Country Scientific Production",y = 'Articles',x = 'Country' )+
#   # 字体
#   theme(text = element_text(size = 15))+
#   # x y 反转
#   coord_flip()
# dev.off()

# Most Cited Countries 最多被引国家
TCperCountries<- as.data.frame(S$TCperCountries)
colnames(TCperCountries)<- c('Country','TotalCitations','AverageArticleCitations')
TCperCountriesData <- (M %>% select(.data$AU1_CO,.data$TC,`PM`)  %>%  group_by(.data$AU1_CO)%>% 
  summarize(totalc=sum(TC),articles = n(),pminfo = paste(PM,collapse = ",")) %>% 
  rename(Country = .data$AU1_CO)  %>% 
  arrange(desc(totalc)))
TCperCountriesData <- TCperCountriesData[which(!is.na(TCperCountriesData$Country)),]
write.xlsx(TCperCountriesData,'TCperCountriesData.xlsx')


TCperCountries$Country<-as.character(TCperCountries$Country)
TCperCountries$TotalCitations<-as.numeric(as.character(TCperCountries$TotalCitations))
TCperCountries$AverageArticleCitations<-as.numeric(as.character(TCperCountries$AverageArticleCitations))

# Cairo::CairoJPEG(file="TCperCountries.jpeg", width=16, height=16,units="in",dpi=130)
# ggplot(TCperCountries,  aes(x =  fct_reorder(Country, (TotalCitations)), y = TotalCitations))  +
#   # 条形图函数：fill设置条形图填充色，colour设置条形图边界颜色
#   geom_bar(stat = "identity", fill = "lightblue", colour = "black") +
#   # 字体
#   theme(text = element_text(size = 15))+
#   # 修改别名
#   labs(title = "TCperCountries",y = 'TotalCitations',x = 'Country' )+
#   # x y 反转
#   coord_flip()
# dev.off()


# 被引最多的国家
MostCitedPapers<- as.data.frame(S$MostCitedPapers)
colnames(MostCitedPapers)<- c('Paper','DOI','TC','TCperYear','NTC')
MostCitedPapers$Paper<-as.character(MostCitedPapers$Paper)
MostCitedPapers$TC<-as.numeric(as.character(MostCitedPapers$TC))
MostCitedPapers$TCperYear<-as.numeric(as.character(MostCitedPapers$TCperYear))
MostCitedPapers$NTC<-as.numeric(as.character(MostCitedPapers$NTC))

# Cairo::CairoJPEG(file="MostCitedPapers.jpeg", width=16, height=16,units="in",dpi=130)
# ggplot(MostCitedPapers,  aes(x =  fct_reorder(Paper, (TC)), y = TC))  +
#   # 条形图函数：fill设置条形图填充色，colour设置条形图边界颜色
#   geom_bar(stat = "identity", fill = "lightblue", colour = "black") +
#   # 字体
#   theme(text = element_text(size = 15))+
#   # 修改别名
#   labs(title = "MostCitedPapers",y = 'TC',x = 'Paper' )+
#   # x y 反转
#   coord_flip()
# dev.off()

# plotAll<-plot(x=results, k=10, pause=F)
# plot(plotAll$MostProdAuthors)

#单词
MostRelKeywords<- as.data.frame(S$MostRelKeywords)
# 【Most Frequent Words - Keywords Plus】
colnames(MostRelKeywords)<-c('AuthorKeywords','Articles','KeywordsPlus','Articles')
MostRelKeywordsPlus<- MostRelKeywords[,c(3:4)]
MostRelKeywordsAuthor<- MostRelKeywords[,c(1:2)]
MostRelKeywordsPlus$KeywordsPlus<-as.character(MostRelKeywordsPlus$KeywordsPlus)
MostRelKeywordsPlus$Articles<-as.character(MostRelKeywordsPlus$Articles)
MostRelKeywordsAuthor$AuthorKeywords<-as.character(MostRelKeywordsAuthor$AuthorKeywords)
MostRelKeywordsAuthor$Articles<-as.character(MostRelKeywordsAuthor$Articles)
#############################################################
# Tag <-'ID'
# i <- which(names(M) == Tag)
# Tab <- unlist(strsplit(as.character(M %>% dplyr::pull(i)), 
#                        sep))
# Tab <- trimws(gsub("\\s+|\\.|\\,", " ", Tab))
# Tab <- Tab[nchar(Tab) > 0]
# Tab <- sort(table(Tab), decreasing = TRUE)
# remove.terms = NULL
# if ((Tag %in% c("DE", "ID")) & (!is.null(remove.terms))) {
#   term <- setdiff(names(Tab), toupper(remove.terms))
#   Tab <- Tab[term]
# }
# ID <- tableTag(M, "ID", sep)
#############################################



# verbose <- TRUE
# if (isTRUE(verbose)){cat("\nMost Relevant Keywords\n\n")}
#   k=min(c(length(M$DE),length(M$ID)))
# AAA=data.frame(cbind(M$DE[1:k]))
# AAA$MPA=row.names(AAA);
# AAA=AAA[,c(2,1)]
# names(AAA)=c("DE Keywords", "Articles")
# A2=data.frame(cbind(M$ID[1:k]))
# A2$MPA=row.names(A2);
# A2=A2[,c(2,1)]
# AAA[,c(3,4)]=A2
# names(AAA)=c("Author Keywords (DE)", "Articles","Keywords-Plus (ID)", "Articles" )
# AAA=format(AAA,justify="left",digits=3)
# row.names(AAA)=1:k
# if (isTRUE(verbose)){
#   print(AAA,row.names=TRUE);
#   cat("\n")
#   }else {
#     (AAA=NULL)
#   }
#############################################
#ID 词
listiD <- (strsplit(M$ID, ";"))
# 每个集合的数量
niD <- lengths(listiD)
dfiD <- data.frame(iDS = trimws(unlist(listiD)), PM = rep(M$UT, niD))
KeywordsPlusID <- dfiD %>% group_by(.data$iDS) %>% mutate(n = n())%>% mutate(pmids = paste(.data$PM, collapse = ",")) %>% select(iDS,pmids,n) %>%
  as.data.frame() %>% distinct() %>% arrange(desc(.data$n))
KeywordsPlusID<- KeywordsPlusID[which(!is.na(KeywordsPlusID$iDS)),]
KeywordsPlusID<-KeywordsPlusID[c(1:topS),]
write.xlsx(KeywordsPlusID,'KeywordsPlusID.xlsx')
###################################################

#DE Author Keywords (DE) 词
listDE <- (strsplit(M$DE, ";"))
# 每个集合的数量
nDE <- lengths(listDE)
dfDE <- data.frame(DES = trimws(unlist(listDE)), PM = rep(M$UT, nDE))
AuthorKeywordsDE <- dfDE %>% group_by(.data$DES) %>% mutate(n = n())%>% mutate(pmids = paste(.data$PM, collapse = ",")) %>% select(DES,pmids,n) %>%
  as.data.frame() %>% distinct() %>% arrange(desc(.data$n))
AuthorKeywordsDE<- AuthorKeywordsDE[which(!is.na(AuthorKeywordsDE$DES)),]
AuthorKeywordsDE<-AuthorKeywordsDE[c(1:topS),]
write.xlsx(AuthorKeywordsDE,'AuthorKeywordsDE.xlsx')


MostRelKeywordsPlus$Articles <- as.numeric(MostRelKeywordsPlus$Articles)
MostRelKeywordsAuthor$Articles <- as.numeric(MostRelKeywordsAuthor$Articles)
# Cairo::CairoJPEG(file="MostRelKeywordsPlus.jpeg", width=16, height=16,units="in",dpi=130)
# ggplot(MostRelKeywordsPlus,  aes(x =  fct_reorder(KeywordsPlus, (Articles)), y = Articles))  +
#   # 条形图函数：fill设置条形图填充色，colour设置条形图边界颜色
#   geom_bar(stat = "identity", fill = "lightblue", colour = "black") +
#   # 修改别名
#   labs(title = "Keywords Plus",y = 'Articles',x = 'key' )+
#   # 字体
#   theme(text = element_text(size = 15))+
#   # x y 反转
#   coord_flip()
# dev.off()
# Cairo::CairoJPEG(file="Authors keywords.jpeg", width=16, height=16,units="in",dpi=130)
# ggplot(MostRelKeywordsAuthor,  aes(x =  fct_reorder(AuthorKeywords, (Articles)), y = Articles))  +
#   # 条形图函数：fill设置条形图填充色，colour设置条形图边界颜色
#   geom_bar(stat = "identity", fill = "lightblue", colour = "black") +
#   # 修改别名
#   labs(title = "Authors keywords",y = 'Articles',x = 'key' )+
#   # 字体
#   theme(text = element_text(size = 15))+
#   # x y 反转
#   coord_flip()
# 
# dev.off()
# 2023年12月13日17:37:54 今天就到这里

# 词云 https://segmentfault.com/a/1190000023022232?utm_source=tag-newest
# 采用上面生成的key 和aude 词进行使用

# Wordcloud by Keywords Plus 服务器版本无法使用 暂时放一下
# MostRelKeywordsPlusTemp<- wordcloud2(MostRelKeywordsPlus,
#                                      size=0.5,#字体大小
#                                      fontFamily = 'Segoe UI',#字体
#                                      fontWeight = 'bold',#字体粗细
#                                      color='random-light',#字体颜色设置
#                                      backgroundColor = 'grey')
# 
# htmltools::save_html(MostRelKeywordsPlusTemp,file = "MostRelKeywordsPlusTemp.html")
# # 下面这个在linux 无法生成 换成了上面这个
# #saveWidget(MostRelKeywordsPlusTemp, file = "MostRelKeywordsPlusTemp.html")
# webshot("MostRelKeywordsPlusTemp.html", "MostRelKeywordsPluswordclouds.jpeg")
# MostRelKeywordsAuthorwordcloudsTemp<-wordcloud2(MostRelKeywordsAuthor,
#                                                 size=0.5,#字体大小
#                                                 fontFamily = 'Segoe UI',#字体
#                                                 fontWeight = 'bold',#字体粗细
#                                                 color='random-light',#字体颜色设置
#                                                 backgroundColor = 'grey')
# 
# htmltools::save_html(MostRelKeywordsAuthorwordcloudsTemp,file = "MostRelKeywordsAuthorwordcloudsTemp.html")
# #saveWidget(MostRelKeywordsAuthorwordcloudsTemp, file = "MostRelKeywordsAuthorwordcloudsTemp.html")
# webshot("MostRelKeywordsAuthorwordcloudsTemp.html", "MostRelKeywordsAuthorwordclouds.jpeg")


# treemap
# Cairo::CairoJPEG(file="Tree Map by Keywords Plus.jpeg", width=16, height=16,units="in",dpi=130)
# treemap(MostRelKeywordsPlus, index=c("KeywordsPlus","Articles"), vSize="Articles",
#         vColor="Articles", type="color",
#         
#         title='Tree Map by Keywords Plus',palette='RdBu')
# dev.off()

# 关键词年份增长图
KeywordGrowth= KeywordGrowth(M,Tag = "ID", sep = ";", top = 10, cdf = TRUE)
DFKeywordGrowth=melt(KeywordGrowth, id='Year')
write.xlsx(DFKeywordGrowth,'DFKeywordGrowth.xlsx')
colnames(DFKeywordGrowth)<-c('Year','keyword','value')
# Cairo::CairoJPEG(file="KeywordGrowth.jpeg", width=16, height=16,units="in",dpi=130)
# ggplot(DFKeywordGrowth,aes(Year,value, group=keyword, color=keyword))+geom_line()+theme(text = element_text(size = 15))+labs(title = "KeywordGrowth" )
# dev.off()
# 2023年12月14日11:22:43 下面类似有很多做过的

# Section 2: The Intellectual Structure of the field - Co-citation Analysis
# Article (References) co-citation analysis 数量太多就狠卡
# try({
#   referencesNetMatrix <- biblioNetwork(M, analysis = "coupling", network = "references", sep = ";")
#   referencesNetworknet=networkPlot(referencesNetMatrix, n = 10, Title = "Article (References) co-citation analysis", type = "auto", size.cex=TRUE, size=5, remove.multiple=FALSE, labelsize=1,edgesize = 10, edges.min=5)
#   Cairo::CairoJPEG(file="Article (References) co-citation analysis.jpeg", width=16, height=16,units="in",dpi=130)
#   plot(referencesNetworknet$graph)
#   dev.off()
# })


########################################################################

# 2023年12月14日16:17:20 测试源码 
# crossprod <- Matrix::crossprod
# analysis = "coupling"
# NetMatrix <- NA
# network = "references"
# n = 10
# sep = ";"
# shortlabel = TRUE
# 
# labelShort <- function (NET, db = "isi") 
# {
#   LABEL <- colnames(NET)
#   YEAR <- suppressWarnings(as.numeric(sub(".*(\\d{4}).*", 
#                                           "\\1", LABEL)))
#   YEAR[is.na(YEAR)] <- ""
#   switch(db, isi = {
#     AU <- strsplit(LABEL, " ")
#     AU <- unlist(lapply(AU, function(l) {
#       paste(l[1], " ", l[2], sep = "")
#     }))
#     LABEL <- paste0(AU, " ", YEAR, sep = "")
#   }, scopus = {
#     AU <- strsplit(LABEL, "\\. ")
#     AU <- unlist(lapply(AU, function(l) {
#       l[1]
#     }))
#     LABEL <- paste0(AU, ". ", YEAR, sep = "")
#   })
#   return(LABEL)
# }
# 
# removeDuplicatedlabels<-function (LABEL) 
# {
#   tab <- sort(table(LABEL), decreasing = T)
#   dup <- names(tab[tab > 1])
#   for (i in 1:length(dup)) {
#     ind <- which(LABEL %in% dup[i])
#     if (length(ind) > 0) {
#       LABEL[ind] <- paste0(LABEL[ind], "-", as.character(1:length(ind)), 
#                            sep = "")
#     }
#   }
#   return(LABEL)
# }
# 
# WCR <- Matrix::t(cocMatrix(M, Field = "CR", type = "sparse", n, sep, short = short))
# NetMatrix <- crossprod(WCR, WCR)
# NetMatrix <- NetMatrix[nchar(colnames(NetMatrix)) != 0, 
#                        nchar(colnames(NetMatrix)) != 0]
# if (network == "references" & isTRUE(shortlabel)) {
#   LABEL <- labelShort(NetMatrix, db = tolower(M$DB[1]))
#   LABEL <- removeDuplicatedlabels(LABEL)
#   colnames(NetMatrix) <- rownames(NetMatrix) <- LABEL
# }
# 
# summary(NetMatrix)
###############################################################################



#Journal (Source) co-citation analysis
# try({
#   cocitationJournalM=metaTagExtraction(M,"CR_SO",sep=";")
#   NetMatrixJournal <- biblioNetwork(cocitationJournalM, analysis = "co-citation", network = "sources", sep = ";")
#   Journalcocitationnet=networkPlot(NetMatrixJournal, n = 10, Title = "Journal Co-Citation Network", type = "auto", size.cex=TRUE, size=10, remove.multiple=FALSE, labelsize=1,edgesize = 10, edges.min=5)
#   Cairo::CairoJPEG(file="Journal (Source) co-citation analysis.jpeg", width=16, height=16,units="in",dpi=130)
#   plot(Journalcocitationnet$graph)
#   dev.off()
# })


# Section 3: Historiograph - Direct citation linkages
# try({
#   histResults <- histNetwork(M, sep = ";")
#   options(width = 130)
#   Cairo::CairoJPEG(file="Historiograph.jpeg", width=16, height=16,units="in",dpi=130)
#   histPlot(histResults, n=20, size = 5, labelsize = 4)
#   dev.off()
# })

#2023年12月15日10:12:02 直接搞一个最后一个用java 做 合作网络作者  提取 AU PM
# 合作者
MAUCOM <- M[,c('AU','PM')]
write.xlsx(MAUCOM,'MAUCOM.xlsx')



# Section 4: The conceptual structure - Co-Word Analysis
# 科学领域概念结构图 对从关键字、标题或摘要字段中提取的术语进行对应分析（CA）


#关键词共线网络

# NetKeyWordMatrix <- biblioNetwork(M, analysis = "co-occurrences", network = "keywords", sep = ";")
# # Plot the network
# # 	
# # is a character object. It indicates the network map layout:
#   
#   # type="auto"		Automatic layout selection
# # type="circle"		Circle layout
# # type="sphere"		Sphere layout
# # type="mds"		Multidimensional Scaling layout
# # type="fruchterman"		Fruchterman-Reingold layout
# # type="kamada"		Kamada-Kawai layout
# netKeyWord=networkPlot(NetKeyWordMatrix, normalize="association", weighted=T, n = 10,
#                        Title = "Keyword Co-occurrences", type = "kamada", size=T,edgesize = 4,labelsize=0.8)
# 
# 
# # 
# Cairo::CairoJPEG(file="Keyword Co-occurrences.jpeg", width=16, height=16,units="in",dpi=130)
# plot(netKeyWord$graph)
# dev.off()

# try({
#   CS <- conceptualStructure(M,field="ID", method="CA", minDegree=4, clust=5, stemming=FALSE, labelsize=10, documents=10)
# })
# 
# # Results of CA, MCA or MDS method  CA、MCA或MDS方法的结果
# # plot(CS$res)
# # Results of cluster analysis  聚类分析结果
# #plot(CS$km.res)
# # Conceptual structure map 概念结构图
# try({
#   Cairo::CairoJPEG(file="Conceptual structure map.jpeg", width=16, height=16,units="in",dpi=130)
#   plot(CS$graph_terms)
#   dev.off()
# })
# try({
#   #  Results of cluster analysis  聚类分析结果
#   Cairo::CairoJPEG(file="Results of cluster analysis.jpeg", width=16, height=16,units="in",dpi=130)
#   plot(CS$graph_dendogram)
#   dev.off()
#   
# })
# try({
#   # Factorial map of the documents with the highest contributes 贡献值最高的文档的阶乘映射
#   Cairo::CairoJPEG(file="Factorial map of the documents with the highest contributes.jpeg", width=16, height=16,units="in",dpi=130)
#   plot(CS$graph_documents_Contrib)
#   dev.off()
#   
# })
# try({
#   #Section 5: Thematic Map
#   thematicMapres = thematicMap(M, field = "ID", n = 250, minfreq = 5, size = 0.7, repel = TRUE)
#   Cairo::CairoJPEG(file="Thematic Map.jpeg", width=16, height=16,units="in",dpi=130)
#   plot(thematicMapres$map)
#   dev.off()
#   
# })
# 
# try({
#   # Section 6: The social structure - Collaboration Analysis
#   
#   # Author collaboration network
#   AuthorNetMatrix <- biblioNetwork(M, analysis = "collaboration",  network = "authors", sep = ";")
#   aunet=networkPlot(AuthorNetMatrix,  n = 20, Title = "Author collaboration",type = "circle", size=10,size.cex=T,edgesize = 3,labelsize=1)
#   Cairo::CairoJPEG(file="Author collaboration network.jpeg", width=16, height=16,units="in",dpi=150)
#   plot(aunet$graph)
#   dev.off()
#   
# })
# try({
#   # Edu协作网络
#   NetMatrix <- biblioNetwork(M, analysis = "collaboration",  network = "universities", sep = ";")
#   Edunet=networkPlot(NetMatrix,  n = 20, Title = "Edu collaboration",type = "circle", size=4,size.cex=F,edgesize = 3,labelsize=1)
#   Cairo::CairoJPEG(file="Edu collaboration network.jpeg", width=16, height=16,units="in",dpi=150)
#   plot(Edunet$graph)
#   dev.off()
#   
# })
# try({
#   # 合作国家
#   McON<- metaTagExtraction(M, Field = "AU_CO", sep = ";")
#   NetMatrixcON <- biblioNetwork(McON, analysis = "collaboration", network = "countries", sep = ";")
#   netcollaboration=networkPlot(NetMatrixcON, n = dim(NetMatrixcON)[1], Title = "Country Collaboration", type = "circle", size=TRUE, remove.multiple=FALSE,labelsize=1.2,cluster="none")
#   Cairo::CairoJPEG(file="Country Collaboration.jpeg", width=16, height=16,units="in",dpi=150)
#   plot(netcollaboration$graph)
#   dev.off()
#   
#   
# })

tryCatch({
  quali = NULL
  quanti = NULL
  quali.supp = NULL
  quanti.supp = NULL
  synonyms = NULL
  remove.terms = NULL
  labelsize=10
  #次数最低出现次数纳入计算
  minDegree = 3
  #方法 ca
  method = "CA"
  clust = 5
  # 保留最大集群数量 默认是5 
  k.max = 5
  # 分区最大显示文章数量
  documents = 2
  # 自定义源码 factorial 而不能用base 包下默认的
  
  # cbPalette <- colorlist()
  SUPP = data.frame(M[, quanti.supp])
  names(SUPP) = names(M)[quanti.supp]
  row.names(SUPP) = tolower(row.names(M))
  
  binary = FALSE
  if (method == "MCA") {
    binary = TRUE
  }
  
  CW <- cocMatrix(M, Field = "ID", type = "matrix", sep = ";", 
                  binary = binary, remove.terms = remove.terms, synonyms = synonyms)
  
  rownames(CW) <- paste(rownames(CW),M$UT,sep = ' PMID:')
  
  CW = CW[, colSums(CW) >= minDegree]
  CW = CW[, !(colnames(CW) %in% "NA")]
  CW = CW[rowSums(CW) > 0, ]
  cwdframFilter <- as.data.frame(CW)
  
  # write.csv(M[,c('UT','TI','ID')],file = '20231206.csv')
  
  colnames(CW) = tolower(colnames(CW))
  rownames(CW) = tolower(rownames(CW))
  p = dim(CW)[2]
  # res.mca <- CA(CW, quanti.sup=quanti, quali.sup=quali, ncp=2, graph=FALSE)
  results <- factorial(CW, method = method, quanti = quanti, quali = quali)
  res.mca <- results$res.mca
  df <- results$df
  docCoord <- results$docCoord
  df_quali <- results$df_quali
  df_quanti <- results$df_quanti
  
  ### Total Citations of documents 全部文档被引次数
  if ("TC" %in% names(M) & method!="MDS"){docCoord$TC=as.numeric(M[toupper(rownames(docCoord)),"TC"])}
  
  # Selection of optimal number of clusters (gap statistics) 选择最优簇数(差距统计)
  #a=fviz_nbclust((df), kmeans, method = "gap_stat",k.max=k.max)['data']$data$y
  km.res=hclust(dist(df),method="average")
  if (clust=="auto"){
    clust=min((length(km.res$height)-which.max(diff(km.res$height))+1),k.max)
  }else{clust=max(1,min(as.numeric(clust),k.max))}
  
  km.res$data=df
  km.res$cluster=cutree(km.res,k=clust)
  km.res$data.clust=cbind(km.res$data,km.res$cluster)
  names(km.res$data.clust)[3]="clust"
  centers<- km.res$data.clust %>% group_by(.data$clust) %>% 
    summarise("Dim.1"=mean(.data$Dim.1),"Dim.2"=mean(.data$Dim.2)) %>% 
    as.data.frame()
  
  km.res$centers=centers[,c(2,3,1)]
  data("logo",envir=environment())
  logo <- grid::rasterGrob(logo,interpolate = TRUE)
  
  df_clust <- km.res$data.clust %>% 
    mutate(shape = "1",
           label = row.names(.)) %>% 
    bind_rows(km.res$centers %>% mutate(shape = "0", label="")) %>% 
    mutate(color = colorlist()[.data$clust])
  
  hull_data <- 
    df_clust %>%
    group_by(.data$clust) %>% 
    slice(chull(.data$Dim.1, .data$Dim.2))
  
  hull_data <- hull_data %>%
    bind_rows(
      hull_data %>% group_by(clust) %>% slice_head(n=1)
    ) %>%
    mutate(id = row_number()) %>%
    arrange(.data$clust,.data$id)
  
  size <- labelsize
  # 把每个点所在的文章集合拿过来 gcr 2023年12月8日15:35:58
  df_clust <- dplyr::mutate(df_clust,keys = row.names(df_clust))
  CWframe<- as.data.frame(CW)
  CWframe <- dplyr::mutate(CWframe,keys = row.names(CWframe))
  # indexKey <-(colnames(CWframe))
  # indexKey<-indexKey[1:(length(indexKey)-1)]
  rows<- as.data.frame(rownames(CWframe))
  colnames(rows) <- 'arti'
  rownames(CWframe) <- NULL
  colnames(CWframe)
  # data.frame(CWframe,row.names=1)
  CWframecandr<-t(CWframe)
  colnames(CWframecandr) <- CWframe[,'keys']
  
  # 删除多余行
  CWframecandrdel <- CWframecandr[-dim(CWframecandr)[1],]
  CWframecandrdel<-as.data.frame(CWframecandrdel)
  CWframecandrdel <- dplyr::mutate(CWframecandrdel,keys = row.names(CWframecandrdel))
  
  rownames(CWframecandrdel) <- NULL
  alldata1 <- dplyr::left_join(df_clust,CWframecandrdel,by = 'keys')
  
  # 测试一次拿到所有字段 2023年12月9日09:31:23 
  # CWframecandrdelTest<- CWframecandrdel[CWframecandrdel$keys=='fibroblasts',1]
  # CWframecandrdelt<- t(CWframecandrdel)
  # CWframecandrdelt<- as.data.frame(CWframecandrdelt)
  # colnames(CWframecandrdelt) <- CWframecandrdelt[dim(CWframecandrdelt)[1],]
  # CWframecandrdelt<- CWframecandrdelt[-dim(CWframecandrdelt)[1],]
  # # 不搞了 ,太麻烦了 生成本地 java进行模拟 2023年12月9日09:44:10 
  # write.ex(alldata1,file = 'alldata1.csv')
  # 指定x待写入数据，file生成的文件名，row.names为false则不生成行名，指定sheet工作表名为Sheet1
  write.xlsx(alldata1, file = "alldata1.xlsx", row.names = FALSE, sheetName = "Sheet1")

},error = function(e){
  
})

tryCatch({
  if (documents>dim(docCoord)[1]){
    documents=dim(docCoord)[1]
  }
  centers = data.frame(dim1 = km.res$centers[, 1], dim2 = km.res$centers[, 2])
  cbPalette <- colorlist()#c(brewer.pal(9, 'Set1')[-6], brewer.pal(8, 'Set2')[-7], brewer.pal(12, 'Paired')[-11],brewer.pal(12, 'Set3')[-c(2,8,12)])
  centers$color = cbPalette[1:dim(centers)[1]]
  # 上色
  row.names(centers) = paste("cluster", as.character(1:dim(centers)[1]), sep = "")
  A = euclDist(docCoord[, 1:2], centers)
  docCoord$Cluster = A$color
  A$color = cbPalette[A$color]
  A$contrib <- docCoord$contrib
  A <- A %>% mutate(names = row.names(A)) %>% group_by(.data$color) %>% 
    top_n(n = documents, wt = .data$contrib) %>% select(!"contrib") %>% 
    as.data.frame()
  
  write.xlsx(alldata1, file = "alldata1.xlsx", row.names = FALSE, sheetName = "Sheet1")
  #下面不需要这个搞了 原来就有了 还是需要组合数据
  row.names(A) <- A$names
  A <- A[, -4]
  names(centers) = names(A)
  allSize<- dim(A)[1]
  A = rbind(A, centers)
  x = A$dim1
  y = A$dim2
  A[, 4] = row.names(A)
  names(A)[4] = "nomi"
  
  # 添加一列分组进行合并到一起 测试
  centersMy <- dplyr::mutate(centers,cluster = row.names(centers))
  colnames(centersMy)<- paste('center', colnames(centersMy))
  paramA<- A
  colnames(A)[3] <-c('center color')
  aceters <- dplyr::left_join(A,centersMy,by = 'center color')
  # aceters <- aceters[c(1:allSize),]
  
  # 至此 这个A 应该就是我需要的数据了 可以存在本地 后续再看
  write.xlsx(aceters,file = 'Factorial map of the documents.xlsx')
  
},error = function(e){
  
})





# 循环数据搞不定 才用行转列 进行拼接吧
# for (variable in indexKey) {
#   print(variable)
#   aa<-as.data.frame(CWframe[,variable])
#   colnames(aa) <- variable
#   dim(aa[aa$variable==1,])
#   aa$flag < as.numeric(aa$flag )
#   aa2<-cbind(rows,aa)
#   
# }
# for (variable in asss) {
#   print(variable)
#   aa<-as.data.frame(CWframe[,variable])
#   dim(aa)
#   colnames(aa) <- 'flag'
#   aa$flag < as.numeric(aa$flag )
#   aa2<- dplyr::cross_join(rows,aa)
#    print(dim(aa2))
#    print(("+++++++++++++++++++++++++++++"))
# }
# print(("+++++++++++++++++++++++++++++"))

# 画图给
# b <- ggplot(df_clust, aes(x=.data$Dim.1, y=.data$Dim.2, shape=.data$shape, color=.data$color)) +
#   geom_point() + 
#   geom_polygon(data = hull_data,
#                aes(fill = .data$color,
#                    colour = .data$color),
#                alpha = 0.3,
#                show.legend = FALSE) +
#   ggrepel::geom_text_repel(aes(label=.data$label)) +
#   theme_minimal()+
#   labs(title= paste("Conceptual Structure Map - method: ",method,collapse="",sep="")) +
#   geom_hline(yintercept=0, linetype="dashed", color = adjustcolor("grey40",alpha.f = 0.7))+
#   geom_vline(xintercept=0, linetype="dashed", color = adjustcolor("grey40",alpha.f = 0.7))+
#   theme(
#     text = element_text(size=size),
#     axis.title=element_text(size=size,face="bold"),
#     plot.title=element_text(size=size+1,face="bold"),
#     panel.background = element_rect(fill = "white", colour = "white"),
#     axis.line.x = element_line(color="black",linewidth=0.5),
#     axis.line.y = element_line(color="black",linewidth=0.5),
#     panel.grid.major = element_blank(),
#     panel.grid.minor = element_blank())
# 
# b 
# 这个是关键词聚类结束
# 至此 这里就实现图一显示 需要合并下数据

# Factorial map of the documents with the highest contributes
# 贡献值最高的文档的阶乘映射
## Factorial map of most contributing documents



print('全部结束')


# 




