library(readxl)
library(dplyr)
library(tm)
library(tidyr)
library(tidytext)
library(ggplot2)
library(wordcloud2)
library(widyr)
library(ggraph)
library(igraph)
library(networkD3)
library(visNetwork)
library(lubridate)
library(tidyverse)
library(wesanderson)
library(Cairo)
library(showtext)
library(viridis)
#读入美国专利同族代表列表
rawdata<-read_excel("专利族列表.xlsx",sheet = "Sheet5-调光")

#========================基于本文的关联度计算=======================================

#========================摘要预处理==============
rawdatalatin<-rawdata%>%
  mutate(Abstract=iconv(英文同族摘要,to='latin1'))%>%
  #mutate(Abstract=iconv(技术功效语段,to='latin1'))%>%
  #filter(第一轮聚类结果 %in% c('1','2','3','7','13','10','24','25','30','32','39','61','65'))
  filter(公开号 %in% subDataPN) #用于第二轮筛选，基于第一轮聚类中联通数量最多的子图得到的公开号集合

#转换语料库，进一步预处理
testCorpus<-Corpus(VectorSource(rawdatalatin$Abstract))
testCorpus<-testCorpus%>%
  tm_map(tolower)%>%
  tm_map(removePunctuation)%>%
  tm_map(removeNumbers)
testCorpusCopy<-testCorpus
#去词干
testCorpus<-testCorpus%>%tm_map(stemDocument)
#补笔
#补笔testCorpus<-testCorpus%>%tm_map(stemCompletion,dictionary=testCorpusCopy)

#投建分析用数据框
PatentList<-unnest(data_frame(id=as.numeric(rownames(rawdatalatin))-1,PN=rawdatalatin$公开号,Applicant=rawdatalatin$Applicant,TechCl=rawdatalatin$TechCode,Abstract=lapply(testCorpus,as.character),minPRDate=rawdatalatin$`[A]最早优先权日`,minPRYear=year(rawdatalatin$`[A]最早优先权日`),Imp=rawdatalatin$重要度))
#增加每五年分段和每10年分段
PatentList<-PatentList%>%
  mutate(minPR5PeriodYear=ifelse(substr(minPRYear,4,4)<=5,paste(substr(minPRYear,1,3),"0~",substr(minPRYear,1,3),"5",sep=""),paste(substr(minPRYear,1,3),"6~",substr(minPRYear,1,3),"9",sep="")))%>%
  mutate(minPR10PeriodYear=paste(substr(minPRYear,1,3),"0~",substr(minPRYear,1,3),"9",sep=""))

'摘要分词'
tidyPatAbstract<-PatentList%>%
  unnest_tokens(word,Abstract)
'去除停止词'
tidyPatAbstract<-tidyPatAbstract%>%
  anti_join(stop_words)

'preview word count, determine stop word'
tidyPatAbstract%>%count(word,sort=TRUE)%>%
  filter(n>10)%>%
  mutate(word=reorder(word,n))%>%
  print(n=100)%>%
  ggplot(aes(word,n))+
  geom_col()+
  coord_flip()



'remove myStopword from data'
#专利文献常用停止词
myStopword<-data_frame(word=c('form','embodi','fig','system','unit','accord','includ','control','exampl','invent','claim','provid','shown','configur','generat','describ','process','compris','method','modul','bte','combin','apparatus','comput','data','determin','receiv','devic','correspond','element','set','na','oper','predetermin'))
myStopword<-rbind(myStopword,data_frame(word=c('respect','effect','layerxd','defin')))
myStopword<-rbind(myStopword,data_frame(word=c('claim', 'compris', 'accord', 'character', 'element', 'form', 'method', 'unit', 'assembl', 'includ', 'especi', 'provid', 'consist','plural', 'reduc', 'substanti', 'desir', 'constitut', 'possibl', 'prefer', 'refer', 'essenti','devic', 'system', 'type', 'andxd', 'xd', 'invent', 'structur', 'chosen', 'obtain', 'follow', 'andor','whereinxd','comprisingxd')))


#3D领域停止词
myStopword<-rbind(myStopword,data_frame(word=c('imag','display','stereoscop','threedimension','imagexd')))


#液晶调光停止词
myStopword<-rbind(myStopword,data_frame(word=c('liquid','crystal')))

#功效停止词
myStopword<-rbind(myStopword,data_frame(word=c('improv','anoth')))

#去掉停止词
tidyPatAbstract<- tidyPatAbstract%>%anti_join(myStopword)

'可以反复测试preview word count, determine stop word'

#=================文本相关性分析============================================================
'计算相关度'
patdoc_cors<-tidyPatAbstract%>%count(id,word,sort=TRUE)%>%
  ungroup()%>%
  pairwise_cor(id, word, n, sort = TRUE)%>%
  filter(item1>item2)
#patdoc_cors2<-tidyPatAbstract%>%count(PN,word,sort=TRUE)%>%pairwise_cor(PN,word,n,sort=TRUE)
'总览相关度数据'
summary(patdoc_cors)


##=====================================基于分类标识的相关性分析============================
'类别相关度计算函数'
'1-字符串近似度计算函数matchDegreeBetweenTwoStrings，返回两个字符串从左侧起匹配的字符数'
matchDegreeBetweenTwoStrings<-function(String1, String2){
  tmpResult<-0
  minLen<-min(c(nchar(String1),nchar(String2)))
  #print(minLen)
  i<-1
  while ((i<=minLen)&(substr(String1,1,i)==substr(String2,1,i))){
    tmpResult<-tmpResult+1
    #print(tmpResult)
    i=i+1
  }
  return(tmpResult)
}

'2-计算两个包含多分类号且用;隔开的分类号之间的近似度'
#适用于一记录多分类，且用分号隔开的情形，但执行速度慢
corOnCl<- function(item1Cl, item2Cl){
  item1Cl<-as.character(item1Cl)
  item2Cl<-as.character(item2Cl)
  tmpResult<-list()
  for (m in 1:length(item1Cl)){
    tmpResult[[m]]<-0
    if (item1Cl[[m]]==""|item2Cl[[m]]=="")
    {tmpResult[[m]]<-0}
    else
    {
      '拆分分类号成为向量'
      item1ClUnnest<-as.vector(unlist(strsplit(item1Cl[[m]],";")))
      item2ClUnnest<-as.vector(unlist(strsplit(item2Cl[[m]],";")))
      
      '比对两个向量之间的两两近似度'
      for (i in 1:length(item1ClUnnest)){
        for (j in 1:length(item2ClUnnest)){
          if (matchDegreeBetweenTwoStrings(item1ClUnnest[i],item2ClUnnest[j])>tmpResult[[m]]){
            maxLenV<-max(c(nchar(item1ClUnnest[i]),nchar(item2ClUnnest[j])))
            tmpResult[[m]]<-(matchDegreeBetweenTwoStrings(item1ClUnnest[i],item2ClUnnest[j])/maxLenV)}
          #  print(paste(nchar(item1ClUnnest[i]),nchar(item2ClUnnest[j]),item1ClUnnest[i],item2ClUnnest[j], tmpResult[[m]]))
          # print(paste(item1ClUnnest[i],' ',item2ClUnnest[j],'=',matchDegreeBetweenTwoStrings(item1ClUnnest[i],item2ClUnnest[j])))
        }
      }
      
    }
  }
  return(tmpResult)
}
#适用于一记录一分类，执行速度略快
corOnClSim<- function(item1Cl, item2Cl){
  item1Cl<-as.character(item1Cl)
  item2Cl<-as.character(item2Cl)
  tmpResult<-list()
  for (m in 1:length(item1Cl)){
    tmpResult[[m]]<-0
    if (item1Cl[[m]]==""|item2Cl[[m]]=="")
    {tmpResult[[m]]<-0}
    else
    {
      # '拆分分类号成为向量'
      # item1ClUnnest<-as.vector(unlist(strsplit(item1Cl[[m]],";")))
      # item2ClUnnest<-as.vector(unlist(strsplit(item2Cl[[m]],";")))
      
      '比对两个向量之间的两两近似度'
      # for (i in 1:length(item1ClUnnest)){
      #   for (j in 1:length(item2ClUnnest)){
      #    if (matchDegreeBetweenTwoStrings(item1ClUnnest[i],item2ClUnnest[j])>tmpResult[[m]]){
      maxLenV<-max(c(nchar(item1Cl[[m]]),nchar(item2Cl[[m]])))
      tmpResult[[m]]<-(matchDegreeBetweenTwoStrings(item1Cl[[m]],item2Cl[[m]])/maxLenV)}
    #  print(paste(nchar(item1ClUnnest[i]),nchar(item2ClUnnest[j]),item1ClUnnest[i],item2ClUnnest[j], tmpResult[[m]]))
    # print(paste(item1ClUnnest[i],' ',item2ClUnnest[j],'=',matchDegreeBetweenTwoStrings(item1ClUnnest[i],item2ClUnnest[j])))
    #   }
    # }
    
    #}
  }
  return(tmpResult)
}

'按照技术分类，确定分类相关度'

DescartesPatentList<-as.data.frame(cbind(item1PN=rep(PatentList$PN, each=nrow(PatentList)),item2PN=rep(PatentList$PN,nrow(PatentList))))%>%filter(item1PN!=item2PN)%>%
  left_join(data_frame(PatentList$id,PatentList$PN,PatentList$TechCl,),by=c("item1PN"="PatentList$PN"))%>%
  rename(c("item1Cl"="PatentList$TechCl","item1id"="PatentList$id"))%>%
  left_join(data_frame(PatentList$id,PatentList$PN,PatentList$TechCl),by=c("item2PN"="PatentList$PN"))%>%
  rename(c("item2Cl"="PatentList$TechCl","item2id"="PatentList$id"))

'替换分类为NA替换为""'
#DescartesPatentList[is.na(DescartesPatentList$item1Cl),]$item1Cl<-""
#DescartesPatentList[is.na(DescartesPatentList$item2Cl),]$item2Cl<-""

'过滤掉技术分类为na的记录'
DescartesPatentList<-DescartesPatentList%>%
  filter(!is.na(item1Cl),!is.na(item2Cl))

DescartesPatentListCor<-DescartesPatentList[DescartesPatentList$item1id>DescartesPatentList$item2id,]%>%
  mutate(item1Cl=as.character(item1Cl),item2Cl=as.character(item2Cl))%>%
  mutate(corCl=corOnClSim(item1Cl,item2Cl))

##=====================================各维度相关性合并计算====================================================
kText=1
kCl=0
DescartesPatentListCorJoin<-DescartesPatentListCor%>%
  full_join(patdoc_cors,by=c("item1id"="item1","item2id"="item2"))%>%
  mutate(correlation=ifelse(is.na(correlation),0,correlation))%>%
  mutate(corCl=ifelse(corCl=="NULL",0,corCl))%>%
  mutate(SummaryCor=(kText*as.numeric(correlation)+kCl*as.numeric(corCl))/(kText+kCl))
#对于关联度区间缩放到[0,1]之间。
CorMin<-min(DescartesPatentListCorJoin$SummaryCor)
CorMax<-max(DescartesPatentListCorJoin$SummaryCor)
DescartesPatentListCorJoin$SummaryCor<-(DescartesPatentListCorJoin$SummaryCor-CorMin)/(CorMax-CorMin)
#DescartesPatentListCorJoin$SummaryCor<-DescartesPatentListCorJoin$SummaryCor+(kText)/(kText+kCl)


  
#对于只有本文近似度结果的情况 可以让Kcl置0.

#=================绘制关联图=============================
'制作节点数据'
NodesPat<-data_frame(id=PatentList$id,Applicant=PatentList$Applicant,PN=PatentList$PN,Cl=PatentList$TechCl,description=paste(Applicant=PatentList$Applicant,PatentList$PN,PatentList$TechCl,PatentList$Abstract,sep="\n"),minPR5PeriodYear=PatentList$minPR5PeriodYear,minPRDate=PatentList$minPRDate,Imp=PatentList$Imp)%>%
  mutate(minPR10PeriodYear=PatentList$minPR10PeriodYear)%>%
  mutate(ImpSize=ifelse(Imp=="Core",100,15))%>%
  mutate(ImpSize=ifelse(is.na(Imp),2,ImpSize))%>%
  mutate(alpha=ifelse(Imp=="Core",1,0))%>%
  mutate(alpha=ifelse(is.na(Imp),0.5,alpha))
#filter(!is.na(minPRDate))%>%
#mutate(id= reorder(id, desc(minPRDate)))


'制作连接数据'
LinksPatdoc_cors<-DescartesPatentListCorJoin%>%
  left_join(data_frame(item1id=PatentList$id,Applicant1=PatentList$Applicant),by=c("item1id"="item1id"))%>%
  left_join(data_frame(item2id=PatentList$id,Applicant2=PatentList$Applicant),by=c("item2id"="item2id"))#%>%
  #LinksPatdoc_cors<-DescartesPatentListCor%>%
  #filter(SummaryCor>1.3)
  # filter(Applicant1!="KDX" & SummaryCor>0.38) 
  #filter(((Applicant1=="KDX" | Applicant2=="KDX") & SummaryCor>0.3) | (Applicant1!="KDX" & Applicant2!="KDX" & SummaryCor>0.4))

#增加人工添加连接数据
ManualLink<-read_excel("多公司专利族合并.xlsx",sheet = "人工增加关联关系")
ManualLink<-ManualLink%>%
  left_join(data_frame(item1id=PatentList$id,Applicant1=PatentList$Applicant,item1Cl=PatentList$TechCl,item1PN=PatentList$PN),by=c("item1PN"="item1PN"))%>%
  left_join(data_frame(item2id=PatentList$id,Applicant2=PatentList$Applicant,item2Cl=PatentList$TechCl,item2PN=PatentList$PN),by=c("item2PN"="item2PN"))
ManualLink<-data_frame(item1PN=ManualLink$item1PN,item2PN=ManualLink$item2PN,item1id=ManualLink$item1id,item1Cl=ManualLink$item1Cl,item2id=ManualLink$item2id,item2Cl=ManualLink$item2Cl,corCl=ManualLink$corCl,correlation=ManualLink$correlation,SummaryCor=ManualLink$SummaryCor,Applicant1=ManualLink$Applicant1,Applicant2=ManualLink$Applicant2)

LinksPatdoc_cors<-rbind(LinksPatdoc_cors,ManualLink)

#清楚不需要对象，节省空间
rm(testCorpus)
rm(testCorpusCopy)
rm(patdoc_cors)
rm(DescartesPatentList)
rm(DescartesPatentListCor)
rm(DescartesPatentListCorJoin)
gc()


#summary(LinksPatdoc_cors$SummaryCor)
#静态图绘制 fr图与等高线密度图 20240703更新
connect<-LinksPatdoc_cors%>%
  rename(c('from'='item1PN','to'='item2PN','value'='SummaryCor'))%>%
  filter(value>0.4) #第一道过滤条件，影响后续聚类分组的数据集


#vertices第一种创建方法 基于专利列表
#vertices<-NodesPat%>%
#  rename('name'='PN')
#vertices<-vertices[,c(3,1,2,seq(4,10))]

#vertices第二种创建方法 基于connect数据创建
# Number of connection per person
c( as.character(connect$from), as.character(connect$to)) %>%
  as_tibble() %>%
  group_by(value) %>%
  summarize(n=n()) -> vertices
colnames(vertices) <- c("name", "n")

# Create a graph object with igraph
mygraph <- graph_from_data_frame( connect, vertices = vertices, directed = FALSE )
# Find community
#com <- walktrap.community(mygraph)
com <- cluster_walktrap(mygraph)

#Reorder dataset and make the graph
vertices <- vertices %>% 
  mutate(group =com$membership) %>%
  mutate(group=as.numeric(factor(group,
                                 levels=sort(summary (as.factor(group),maxsum = max(group)),index.return=TRUE,decreasing = T)$ix,
                                 order=TRUE)))
summary(as.factor(vertices$group),maxsum = max(vertices$group))
hist(vertices$group)

vertices <- vertices %>%
  filter(group<20) %>%  #第二道过滤，影响画图时的节点集合
  #filter(n>40) %>% #通过关联数量过滤
  arrange(group,desc(n)) %>%
  mutate(name=factor(name, name))
#增加节点附加属性信息
vertices <- vertices %>% 
  #left_join(data_frame(NodesPat$ImpSize,NodesPat$PN,),by=c("name"="NodesPat$PN"))%>%
  left_join(NodesPat,by=c("name"="PN"))#%>%
  #rename("ImpSize"="NodesPat$ImpSize")

# keep only this people in edges
connect <- connect %>%
  filter(from %in% vertices$name) %>%
  filter(to %in% vertices$name)%>%
  left_join(vertices,by=c('from'='name'))%>%
  filter(value>0.6) #第三道过滤，精简画图时连线的数量

#预览节点数据中的分组序号不同值,以及各分组的数量集合
unique(vertices$group)  
hist(vertices$group)

# Create a graph object with igraph
#mygraph <- graph_from_data_frame( connect, vertices = vertices, directed = FALSE )
mygraph <- graph_from_data_frame(connect,  vertices = vertices, directed = FALSE )

#去除孤立点
bad.vs<-V(mygraph)[degree(mygraph) == 0]
mygraph<-delete_vertices(mygraph, bad.vs)

# mycolor <- wes_palette("Darjeeling1", max(vertices$group), type = "continuous")
# mycolor <- sample(mycolor, length(mycolor))

#Ivan改进增加weight信息加入边属性，乘法可以扩大梯度且不影响整体关联度分布
E(mygraph)$weight<-E(mygraph)$value^2

#根据精简边后的igraph对象重新聚类，并修正igraph对象group属性
com2 <- cluster_walktrap(mygraph)
V(mygraph)$group<-com2$membership
V(mygraph)$group<-as.numeric(factor(V(mygraph)$group,
                  levels=sort(summary(as.factor(V(mygraph)$group),maxsum=max(V(mygraph)$group)),index.return=TRUE,decreasing = T)$ix,
                  order=TRUE))
mycolor <- wes_palette("Darjeeling1", max(V(mygraph)$group), type = "continuous")
mycolor <- sample(mycolor, length(mycolor))

#主题词提取======================================================================
#igraph中删除孤立点后的主题词提取-以删除孤立点后的重新聚类分组为准
igraph_word_group<-data_frame(name=V(mygraph)$name,group=V(mygraph)$group)%>%
  left_join(tidyPatAbstract,by=c('name'='PN'))%>%
  select(group,word)%>%
  count(group,word,sort=TRUE)%>%
  bind_tf_idf(word,group,n)%>%
  group_by(group) %>%
  arrange(desc(tf_idf))%>%
  #mutate(word = factor(word, levels = rev(unique(word)))) %>%
  top_n(15,tf_idf) %>%
  ungroup%>%
  mutate(word = reorder(word,tf_idf))%>%
  ggplot(aes(word, tf_idf, fill = group)) +
  geom_col(show.legend = FALSE) +
  labs(x = NULL, y = "tf-idf") +
  facet_wrap(~group, ncol = 10, scales = "free") +
  coord_flip()
CairoPDF("topic_igraph_word_group.pdf",width = 30, height =ceiling(max(V(mygraph)$group)/10)*3)
#showtext_begin()
igraph_word_group
#showtext_end()
dev.off() #关闭图像设备，同时储存图片
#

#静态力导图开始绘制
ivanfr<-layout_with_fr(mygraph)
#ivanfr<-layout_with_fr(mygraph,weights = rep(10,length(E(mygraph)$value))) #这种方式会改变整体关联度分布，造成关联度整体松弛或整体过密。
p_link<-ggraph(mygraph,layout=ivanfr) + 
  #ggraph(mygraph,layout='fr')  +
  geom_edge_link(edge_colour="black", edge_alpha=0.2, edge_width=0.3) +
  #geom_node_point(aes(size=ImpSize, fill=as.factor(group)), shape=21,color='black',alpha=0.9) +
  geom_node_point(aes(size=2, fill=as.factor(group)), shape=21,color='black',alpha=0.9) +
  scale_size_continuous(range=c(0.5,2)) +
  #scale_fill_manual(values=mycolor) +
  #scale_fill_viridis(option = 'magma',discrete=TRUE)+
  #geom_node_text(aes(label=ifelse(ImpSize>=15, as.character(name), "")), size=1, color="black") +
  expand_limits(x = c(-1.2, 1.2), y = c(-1.2, 1.2))+
  theme_minimal() +
  theme(
    legend.position="none",
    background = 'NA',
    panel.grid = element_blank(),
    axis.line = element_blank(),
    axis.ticks =element_blank(),
    axis.text =element_blank(),
    axis.title = element_blank()
    #plot.margin=unit(c(0,0,0,0), "null"),
    #panel.spacing=unit(c(0,0,0,0), "null")
  )
#生成带有name标签的link图文件
CairoPDF("plot_plink_label_name.pdf",width = 10, height =10)
#showtext_begin()
p_link+
  geom_node_text(aes(label=ifelse(ImpSize>=15, as.character(name), "")), size=1, color="black")
#showtext_end()
dev.off() #关闭图像设备，同时储存图片
#生成带有group标签的link图文件
CairoPDF("plot_plink_label_group.pdf",width = 10, height =10)
#showtext_begin()
p_link+
  geom_node_text(aes(label=ifelse(ImpSize>=15, as.character(group), "")), size=1, color="black")
#showtext_end()
dev.off() #关闭图像设备，同时储存图片
#生成带有Applicant标签的link图文件
CairoPDF("plot_plink_label_Applicant.pdf",width = 10, height =10,family = "msyh")
showtext_begin()
p_link+
  geom_node_text(aes(label=ifelse(ImpSize>=15, as.character(Applicant), "")), size=1, color="black")
showtext_end()
dev.off() #关闭图像设备，同时储存图片

#生成不带标签的link图文件
CairoPDF("plot_plink_nonlabel.pdf",width = 10, height =10)
#showtext_begin()
p_link
#showtext_end()
dev.off() #关闭图像设备，同时储存图片

#生成透明背景PNG，用于后续合成
ggsave(p_link, width = 10, height = 10, units = "in", filename = "plot_plink_nonlabel.png", bg = "transparent")

#实现等高线图绘制
p_density<-ggplot(data_frame(x=ivanfr[,1],y=ivanfr[,2]),aes(x=x,y=y))+
  
  geom_density2d_filled(bins=30)+geom_point(size=1)+
  coord_fixed()+
  expand_limits(x = c(-1.2, 1.2), y = c(-1.2, 1.2))+
  theme_minimal() +
  theme(
    legend.position="none",
    panel.grid = element_blank(),
    axis.line = element_blank(),
    axis.ticks =element_blank(),
    axis.text =element_blank(),
    axis.title = element_blank()
    #plot.margin=unit(c(0,0,0,0), "null"),
    #panel.spacing=unit(c(0,0,0,0), "null")
  )
CairoPDF("plot_density.pdf",width = 10, height =10)
#showtext_begin()
p_density
#showtext_end()
dev.off()

#聚类结果输出====
#画图前未删减孤立点的聚类结果
write.table(data_frame(name=vertices$name,group=vertices$group),"cluster_vertices.txt",append=FALSE,sep = "\t",quote = FALSE,row.names = TRUE, col.names = TRUE,fileEncoding="UTF-8")
#画图后已删减孤立点后的聚类结果
write.table(data_frame(name=V(mygraph)$name,group=V(mygraph)$group),"cluster_igraph.txt",append=FALSE,sep = "\t",quote = FALSE,row.names = TRUE, col.names = TRUE,fileEncoding="UTF-8")

#为第二轮分析准备数据
dg<-decompose(mygraph)
subDataPN<-V(dg[[1]])$name
rm(dg)
gc()


clickJS <- "
d3.selectAll('.xtooltip').remove(); 
d3.select('body').append('div')
.attr('class', 'xtooltip')
.style('position', 'fixed')             # 描述出现位置
.style('border-radius', '0px')
.style('padding', '5px')
.style('opacity', '0.85')              
.style('background-color', '#161823')
.style('box-shadow', '2px 2px 6px #161823')
# 描述内容
.html(d.description) 
.style('right', '50px')
.style('bottom', '50px')uj
# 描述颜色
.style('color', d3.select(this).style('fill'))
;
"
#按申请人画图
ordinal<-"
d3.scaleOrdinal()
.domain(['Z2D','KDX','PHILIPS','SONY','SHARP','LEIA','SuperD','宁波维真','易维视'])
.range(['#4e79a7','#f28e2c','#e15759','#76b7b2','#59a14f','#edc949','#af7aa1','#ff9da7','#9c755f'])
"
myforce<-forceNetwork(Links = LinksPatdoc_cors, Nodes = NodesPat, Source = "item1id",Target = "item2id", Value = "SummaryCor", NodeID = "PN",fontSize = 30,Nodesize = "ImpSize",colourScale =JS(ordinal),
                      Group = "Applicant", opacity = 0.8, legend = TRUE,zoom=T,charge=-1,bounded=F,clickAction= clickJS)
saveNetwork(myforce,file="test按申请人.html")

'临时分析孤立点'
myforcetmp<-forceNetwork(Links = LinksPatdoc_cors, Nodes = NodesPat, Source = "item1id",Target = "item2id", Value = "SummaryCor", NodeID = "PN",fontSize = 30,Nodesize = "ImpSize",colourScale =JS(ordinal),
                         Group = "Applicant", opacity = 0.8, legend = TRUE,zoom=T,charge=-1,bounded=F,clickAction= clickJS)
saveNetwork(myforcetmp,file="test孤立点分析.html")

#按申请年代画图

ordinalYear<-"
d3.scaleOrdinal()
.domain(['NA0~NA9','1960~1969','1970~1979','1980~1989','1990~1999','2000~2009','2010~2019','2020~2029'])
.range(['#f7fcf0','#e0f3db','#ccebc5','#a8ddb5','#7bccc4','#4eb3d3','#2b8cbe','#08589e'])
"
myforce2<-forceNetwork(Links = LinksPatdoc_cors, Nodes = NodesPat, Source = "item1id",Target = "item2id", Value = "SummaryCor", NodeID = "PN",fontSize = 30,Nodesize = "ImpSize",colourScale =JS(ordinalYear),
                       Group = "minPR10PeriodYear", opacity = 1, legend = TRUE,zoom=T,charge=-200,bounded=F,clickAction= clickJS)
saveNetwork(myforce2,file="test按申请年代.html")

