/**
 * 
 * 2016.1.14.
 * 
 * 该类别专用于模块度信息的计算。
 */
package yyh.guanganmen.algorithm;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;

import org.apache.commons.math3.stat.inference.ChiSquareTest;
import org.apache.commons.math3.stat.inference.TTest;

import yyh.guanganmen.common.FileUtil;

/**
 * @author xuezhongzhou
 *
 */
public class CategoryModularity {

	/**
	 * 2016.1.2 进行修改补充，实现了strNodeChapRSFile文件的输出信息。
	 * 2015.12.30. 10:15 pm
	 * 实现所有两个章节之间的边的密度计算。
	 * 该函数拷贝自catMolModularity。
	 * 2015.12.30
	 * @author xuezhongzhou
	 * 该函数计算基于章节对节点进行划分后的ICD编码的分子关联模块的模块度。
	 * 如基于共享基因的网络模块度。
	 * @param strMolAssFile icd1\ticd2\tw\n.
	 * @param strICDSemSimFile icd之间的语义相似性边数据文件，为icd_linSim_filtered_20151230.txt。
	 * @param dblLowLimit 指定语义相似性的下界，为大于等于该值。
	 * @param dblUplimit 指定语义相似性值的上界，为小于该值。
	 * @param strChapRSFile 存储章节之间的边密度信息。
	 * @param strNodeChapRSFile 存储ICD编码与章节之间的边密度信息，包含ICD到其自身章节的边密度信息。
	 */
	public double[][] catMolSpecPairEdgeDensity(
			String strDisCatFile,
			int intICDPos,
			int intICDChapterPos,
			String strMolAssFile,
			double dblThresholdWeight,
			boolean blGreater,
			boolean blFirstFldLine,
			String strICDSemSimFile,
			double dblLowLimit,
			double dblUplimit,
			String strICDNodeDegFile,
			int intICDFldPos,
			int intDegFldPos,
			int intEdgeNum,
			String strSplit,
			String strChapRSFile,
			String strNodeChapRSFile){
		
		BufferedReader bf=null;
		
		BufferedReader bf2=null; //用于读取strICDSemSimFile文件。
		
		BufferedWriter bfw=null;
		String strLine=null;
		
		double dblResult=0.;
		
		String strTmp=null;
		
		int intTmp=0;
		
		double[][] dblMod=null;
		
		int[][] intENodCh=null; //存储各节点与章节之间的边数。
		
	    String[] strArr=null;
	    
		System.out.println("Start processing at time:"+new Date());
		
		HashMap<String,String> hmICDCat=new HashMap<String,String>();
		
		HashMap<String,Integer> hmCatIds=new HashMap<String,Integer>();
		
		HashMap<Integer,String> hmCatIdNames=new HashMap<Integer,String>();
		
		HashMap<Integer,Integer> hmCatICDNums=new HashMap<Integer,Integer>();
		
		HashMap<String,Integer> hmICDNames=new HashMap<String, Integer>();
		
		HashMap<Integer,String> hmICDNameIDs=new HashMap<Integer,String>();
		
		HashMap<String,Double> hmICDSemPairs=new HashMap<String, Double>(); //用于存储满足阈值要求的ICD编码的相似性边信息。
		
		HashMap<String,Integer> hmICDDeg=new HashMap<String, Integer>(); //用于存储ICD编码的在分子网络中的度信息。
		
		
		HashMap<String,String> hmChapICDs=new HashMap<String, String>(); //用于存储ICD编码的在分子网络中的度信息。
		
		try {
			
			
			bf2=new BufferedReader(new FileReader(strICDSemSimFile)); //进行语义相似性边的读取和存储。
			
			while((strLine=bf2.readLine())!=null){
				
				strArr=strLine.split(strSplit);
				if(Double.valueOf(strArr[2])>=dblLowLimit&&
						Double.valueOf(strArr[2])<dblUplimit){
					
					hmICDSemPairs.put(strArr[0]+"\t"+strArr[1], Double.valueOf(strArr[2]));
				}
			
			}
			bf2.close();
			
			/************完成！*********/
			
			bf2=new BufferedReader(new FileReader(strICDNodeDegFile)); //进行ICD编码的度信息读取和存储。
			
			while((strLine=bf2.readLine())!=null){
				
				strArr=strLine.split(strSplit);
				
				hmICDDeg.put(strArr[intICDFldPos], Integer.valueOf(strArr[intDegFldPos]));
			
			}
			
			bf2.close();
			
			/**********完成！*********/
			
			bf=new BufferedReader(new FileReader(strDisCatFile));
			
			int intCatNo=0;
			int intICDNo=0;
			
			while((strLine=bf.readLine())!=null){
				
				strArr=strLine.split(strSplit);
				
				hmICDCat.put(strArr[intICDPos], strArr[intICDChapterPos]);
				
				if(!hmCatIds.containsKey(strArr[intICDChapterPos])){
					
					hmCatIdNames.put(intCatNo, strArr[intICDChapterPos]);
					hmCatIds.put(strArr[intICDChapterPos], intCatNo++);
				
				}
				
				if(!hmCatICDNums.containsKey(hmCatIds.get(strArr[intICDChapterPos]))){
					
					hmCatICDNums.put(hmCatIds.get(strArr[intICDChapterPos]), 1);
					
					
				}else{
					
					intTmp=hmCatICDNums.get(hmCatIds.get(strArr[intICDChapterPos]));
					hmCatICDNums.remove(hmCatIds.get(strArr[intICDChapterPos]));
					hmCatICDNums.put(hmCatIds.get(strArr[intICDChapterPos]), intTmp+1);
					
				}
				
				if(!hmChapICDs.containsKey(strArr[intICDChapterPos])){
					
					hmChapICDs.put(strArr[intICDChapterPos], strArr[intICDPos]);
					
					
				}else{
					
					strTmp=hmChapICDs.get(strArr[intICDChapterPos]);
					hmChapICDs.remove(strArr[intICDChapterPos]);
					hmChapICDs.put(strArr[intICDChapterPos], strTmp+"\t"+strArr[intICDPos]);
					
				}
				
				if(!hmICDNames.containsKey(strArr[intICDPos])){
					
					hmICDNames.put(strArr[intICDPos], intICDNo);
					hmICDNameIDs.put(intICDNo,strArr[intICDPos]);
					intICDNo++;
					
				}
				
			}
			
			bf.close();
			
			int[] intEii=new int[hmCatIds.size()]; //存储章节内部的边数量信息。
			int[] intChapMinusEii=new int[hmCatIds.size()]; //存储各章节内部应该减去的边数量信息。2016.1.14补充。
			
			int[][] intAii=new int[hmCatIds.size()][hmCatIds.size()]; //存储章节之间的边数量信息。
			
			intENodCh=new int[hmCatIds.size()][hmICDNames.size()]; //存储节点与章节之间的边。
			
			dblMod=new double[hmCatIds.size()][hmCatIds.size()];
			
			double[][] dblChapRndEii=new double[hmCatIds.size()][hmCatIds.size()];
			
			int intM=0; //The edge number of strMolAssFile.
			
			String strTmp1=null,strTmp2=null;
			
			bf=new BufferedReader(new FileReader(strMolAssFile));
			
			bfw=new BufferedWriter(new FileWriter(strChapRSFile,true));
			
			if(blFirstFldLine) bf.readLine();
			
			while((strLine=bf.readLine())!=null){
				
				strArr=strLine.split(strSplit);
				
				if(blGreater){
					
					if(Double.valueOf(strArr[2])>=dblThresholdWeight){
						
						intM++;
						
						strTmp1=hmICDCat.get(strArr[0]);
						
						strTmp2=hmICDCat.get(strArr[1]);
						
						if(strTmp1==null||strTmp2==null) 
							continue;
						
						if(strTmp1.equalsIgnoreCase(strTmp2)){
							
							if(hmICDSemPairs.containsKey(strArr[0]+"\t"+strArr[1])||
									hmICDSemPairs.containsKey(strArr[1]+"\t"+strArr[0])){ //增加了对语义相似性的筛选。2016.1.14.
								
								intEii[hmCatIds.get(strTmp1)]++;
								
								intENodCh[hmCatIds.get(strTmp1)][hmICDNames.get(strArr[1])]++;
								intENodCh[hmCatIds.get(strTmp2)][hmICDNames.get(strArr[0])]++;
								
							}else{
								intChapMinusEii[hmCatIds.get(strTmp1)]++;
							}
							
							
						}else{
							
							intAii[hmCatIds.get(strTmp1)][hmCatIds.get(strTmp2)]++;
							intAii[hmCatIds.get(strTmp2)][hmCatIds.get(strTmp1)]++;
							
							intENodCh[hmCatIds.get(strTmp1)][hmICDNames.get(strArr[1])]++;
							intENodCh[hmCatIds.get(strTmp2)][hmICDNames.get(strArr[0])]++;
							
						}
						

						
					}
				}else{
					
					if(Double.valueOf(strArr[2])<dblThresholdWeight){
						
						intM++;
						
						strTmp1=hmICDCat.get(strArr[0]);
						
						strTmp2=hmICDCat.get(strArr[1]);
						
						if(strTmp1==null||strTmp2==null) 
							continue;
						
						if(strTmp1.equalsIgnoreCase(strTmp2)){
							
							if(hmICDSemPairs.containsKey(strArr[0]+"\t"+strArr[1])||
									hmICDSemPairs.containsKey(strArr[1]+"\t"+strArr[0])){ //增加了对语义相似性的筛选。2016.1.14.
								
								intEii[hmCatIds.get(strTmp1)]++;
								
								intENodCh[hmCatIds.get(strTmp1)][hmICDNames.get(strArr[1])]++;
								intENodCh[hmCatIds.get(strTmp2)][hmICDNames.get(strArr[0])]++;
								
							}else{
								
								intChapMinusEii[hmCatIds.get(strTmp1)]++;
							}
							
							
						}else{
							
							intAii[hmCatIds.get(strTmp1)][hmCatIds.get(strTmp2)]++;
							intAii[hmCatIds.get(strTmp2)][hmCatIds.get(strTmp1)]++;
							intENodCh[hmCatIds.get(strTmp1)][hmICDNames.get(strArr[1])]++;
							intENodCh[hmCatIds.get(strTmp2)][hmICDNames.get(strArr[0])]++;
						}
						
						
					}
				}
				
				
			}
			
			bf.close();
			
			intM=0;
			
			int inti=0,intj=0;
			
			//计算节点对之间的期望连接值。
			
			java.util.Iterator<Entry<String, String>> it=hmChapICDs.entrySet().iterator();
			java.util.Iterator<Entry<String, String>> it2=null;
			
			Map.Entry<String, String> me=null;
			Map.Entry<String, String> me2=null;
			
			String[] strTmpArr2=null;
			
			bfw.write("The random chapter matrix\n");
				
			while(it.hasNext()){
				
				me=(Map.Entry<String, String> )it.next();
				
				strArr=me.getValue().split("\t");
				
				it2=hmChapICDs.entrySet().iterator();
				
				while(it2.hasNext()){
					
					me2=it2.next();
					
					strTmpArr2=me2.getValue().split("\t");
					
					System.out.println(me2.getKey());
					
					
					if(!me.getKey().equalsIgnoreCase(me2.getKey())){
						
						for(int i=0;i<strArr.length;i++){
							
							for(int j=0;j<strTmpArr2.length;j++){
								
//								System.out.println(strArr[i]+"\t"+strTmpArr2[j]);
								
								if(hmICDDeg.containsKey(strArr[i])&&hmICDDeg.containsKey(strTmpArr2[j])){
									dblChapRndEii[hmCatIds.get(me.getKey())][hmCatIds.get(me2.getKey())]+=hmICDDeg.get(strArr[i])*hmICDDeg.get(strTmpArr2[j])/(2.*intEdgeNum);
								}
//								System.out.println(hmICDDeg.get(strArr[i])*hmICDDeg.get(strTmpArr2[j])/(2.*intEdgeNum));
								
							
							}
						}
					}else{
						
						for(int i=0;i<strArr.length;i++){
							
							for(int j=i+1;j<strArr.length;j++){
								
//								System.out.println(strArr[i]+"\t"+strTmpArr2[j]);
								
								if(hmICDDeg.containsKey(strArr[i])&&hmICDDeg.containsKey(strArr[j])){
									dblChapRndEii[hmCatIds.get(me.getKey())][hmCatIds.get(me.getKey())]+=hmICDDeg.get(strArr[i])*hmICDDeg.get(strArr[j])/(2.*intEdgeNum);
									
								}
//								System.out.println(hmICDDeg.get(strArr[i])*hmICDDeg.get(strArr[j])/(2.*intEdgeNum));
								
							
							}
						}
						
					}
					
				}
				
			}

			bfw.write("Chapter1\tNumber of icdes\tChapter2\tNumber of icdes" +
					"\tReal edge number\tReal edge density"+
					"\tRandom edge number\tRandom edge density\t"+
					"chisq\tchis_p-value\n");
		
			ChiSquareTest cst=new ChiSquareTest();
			
			long[][] dblRatio=new long[2][2];

			double dblEdge1,dblEdge2;
			
			for(int i=0;i<hmCatIds.size();i++){
				
				strTmp=hmCatIdNames.get(i);
				
				for(int j=i;j<hmCatIds.size();j++){
					
					strTmp2=hmCatIdNames.get(j);
					
					if(i==j){
						
						dblRatio[0][0]=intEii[i];
						
						dblEdge1=(hmCatICDNums.get(i)*(hmCatICDNums.get(j)-1))/2.;
						
						dblRatio[1][0]=(long)dblEdge1-intEii[i];
						
						dblRatio[0][1]=(long)dblChapRndEii[i][j];
						
						dblRatio[1][1]=(long)(dblEdge1-dblChapRndEii[i][j]);
						
						bfw.write(strTmp+"\t"+hmCatICDNums.get(i)+"\t"+strTmp2+"\t"
								+hmCatICDNums.get(j)+"\t"+intEii[i]+"\t"+intEii[i]/dblEdge1+"\t"+dblChapRndEii[i][j]+"\t"
								+dblChapRndEii[i][j]/dblEdge1+"\t"+cst.chiSquare(dblRatio)+"\t"+cst.chiSquareTest(dblRatio)+"\n");
					
					}else{
						
						dblRatio[0][0]=intAii[i][j];
						
						dblEdge1=(hmCatICDNums.get(i)*hmCatICDNums.get(j));
						
						dblRatio[1][0]=(long)dblEdge1-intAii[i][j];
						
						dblRatio[0][1]=(long)dblChapRndEii[i][j];
						
						dblRatio[1][1]=(long)(dblEdge1-dblChapRndEii[i][j]);
						
						bfw.write(strTmp+"\t"+hmCatICDNums.get(i)+"\t"+strTmp2+"\t"
								+hmCatICDNums.get(j)+"\t"+intAii[i][j]+"\t"+intAii[i][j]/dblEdge1+"\t"+dblChapRndEii[i][j]+"\t"+
								dblChapRndEii[i][j]/dblEdge1+"\t"+cst.chiSquare(dblRatio)+"\t"+cst.chiSquareTest(dblRatio)+"\n");
						
					}
				
				}
			}

			
			//书写章节内部及其相互之间的边密度信息。
			
			bfw.write("Chapter1\tNumber of Chpater1\t" +
					"Number of edges in Chpater1\tMinus number of edges in Chapter 1\tEdge density of Chapter1" +
					"\tChapter2\tNumber of Chapter2\t" +"Number of edges in Chpater2\tMinus number of edges in Chapter 2\tEdge density of Chpater2\t" +
							"Number of edges between two chapters\t"+"Edge density between two chapters\t" +
									"Minus value of density\n");
			
			
			double dblEdgeFullNum;
			
			for(int i=0;i<intEii.length;i++){//以下代码经过修改，2016.1.14.
				
				inti=hmCatICDNums.get(i);
				
				dblEdge1=(inti*(inti-1)-intChapMinusEii[i])/2.0;
				
				for(int j=i+1;j<intEii.length;j++){
					
					
					intj=hmCatICDNums.get(j);
					
					dblEdge2=(intj*(intj-1)-intChapMinusEii[j])/2.0;
					
					dblEdgeFullNum=(inti*intj);
					
					dblMod[i][j]=intEii[i]/dblEdge1+intEii[j]/dblEdge2;
					
					dblMod[i][j]-=2.0*intAii[i][j]/dblEdgeFullNum;
					
					bfw.write(hmCatIdNames.get(i)+"\t"+inti+"\t"+intEii[i]+"\t"+intChapMinusEii[i]+"\t"+intEii[i]/dblEdge1+"\t"
							+hmCatIdNames.get(j)+"\t"+intj+"\t"+intEii[j]+"\t"+intChapMinusEii[j]+"\t"+intEii[j]/dblEdge2+"\t"
							+intAii[i][j]+"\t"+1.*intAii[i][j]/dblEdgeFullNum+"\t"+dblMod[i][j]+"\n");
					
				}
				
			}
			
			bfw.close();
			
			/********结束*******/
			
			bfw=new BufferedWriter(new FileWriter(strNodeChapRSFile,true));
			
			double dblDensity=0.;
			
			
			bfw.write("ChapterName\tICD\tChapterName of ICD\tNumber of edges\tNumber of Nodes in Chapter\t" +
					"Density to Chapter\tEdge numbers in Chapter\tDensity of Chapter\n");
			
			for(int i=0;i<hmCatIds.size();i++){
				
				inti=hmCatICDNums.get(i);
				
				dblEdge1=(inti*(inti-1)-intChapMinusEii[i])/2.0;
				
				for(int j=0;j<intENodCh[0].length;j++){
					
					
					if(hmCatIdNames.get(i).equalsIgnoreCase(hmICDCat.get(hmICDNameIDs.get(j)))){//如果ICD属于其本身的章节。
						
						dblDensity=1.*intENodCh[i][j]/(inti-1);
						
					}else{
						dblDensity=1.*intENodCh[i][j]/inti;
					}

					
					bfw.write(hmCatIdNames.get(i)+"\t"+hmICDNameIDs.get(j)+"\t"+hmICDCat.get(hmICDNameIDs.get(j))+"\t"+
							intENodCh[i][j]+"\t"+inti+"\t"+dblDensity+"\t"+intEii[i]+"\t"+intEii[i]/dblEdge1+"\n");
					
				}
			}
			
			hmCatIds.clear();
			hmICDCat.clear();
			hmCatICDNums.clear();
			hmCatIdNames.clear();
			hmICDNameIDs.clear();
			hmICDNames.clear();
			hmICDSemPairs.clear();
			
			bfw.close();
			
			
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		

		return dblMod;
		
	}
	
	/**
	 * 2016.1.4
	 * @author xuezhongzhou
	 * 该函数实现对跨类的疾病编码进行标示和筛选，计算其相应的p-value
	 */
	
	public void genSigInterClsEdgeDensity(
			String strEDensityFile,
			int intOrgChFldPos,
			int intICDFldPos,
			int intICDChFldPos,
			int intEDensFldPos,
			int intICDNumPos,
			String strICDDegFile,
			int intICDPos,
			int intDegPos,
			String strSplit,
			String strRSFile){
		
		BufferedReader bf=null;
		BufferedWriter bfw=null;
		String strLine=null;
		String strTmp=null;
		
		TTest tt=new TTest();
		
		String[] strArr=null;
		
		HashMap<String,String> hmCodes=new HashMap<String,String>();//存储icd代码的上位结果。
		
		HashMap<String,String> hmChapEDenses=new HashMap<String,String>();//存储章节相关的icd编码的edge density。
		
		HashMap<String,String> hmChapICDs=new HashMap<String,String>(); //存储章节相关的icd编码。
		
		HashMap<String,Integer> hmICDDegs=new HashMap<String,Integer>(); //存储章节相关的icd编码。
		
		HashMap<String,Integer> hmChapICDNums=new HashMap<String,Integer>(); //存储章节相关的icd编码数量。
		
		try {
			
			bf=new BufferedReader(new FileReader(strICDDegFile));
			
			long lngEdgeNum=0; //存储分子网络的边的两倍值。
			
			//bf.readLine();
			
			while((strLine=bf.readLine())!=null){
				
				strArr=strLine.split(strSplit);
				
				hmICDDegs.put(strArr[intICDPos],Integer.valueOf(strArr[intDegPos]));
				lngEdgeNum+=Integer.valueOf(strArr[intDegPos]);
				
			}
			
			bf.close();
			
			bf=new BufferedReader(new FileReader(strEDensityFile));
			
			bf.readLine();
			
			while((strLine=bf.readLine())!=null){
				
				strArr=strLine.split(strSplit);
				
				if(strArr[intOrgChFldPos].equalsIgnoreCase(strArr[intICDChFldPos])){ //以下代码存储同一章节内部所有编码的密度值。
					
					if(!hmChapEDenses.containsKey(strArr[intOrgChFldPos])){
						
						hmChapEDenses.put(strArr[intOrgChFldPos],strArr[intEDensFldPos]);
						
					}else{
						
						strTmp=hmChapEDenses.get(strArr[intOrgChFldPos]);
						hmChapEDenses.remove(strArr[intOrgChFldPos]);
						hmChapEDenses.put(strArr[intOrgChFldPos], strTmp+"\t"+strArr[intEDensFldPos]);
					
					}
					if(!hmChapICDs.containsKey(strArr[intOrgChFldPos])){//存储章节相关的ICD编码。
						
						hmChapICDs.put(strArr[intOrgChFldPos],strArr[intICDFldPos]);
						hmChapICDNums.put(strArr[intOrgChFldPos],Integer.valueOf(strArr[intICDNumPos]));
						
					}else{
						
						strTmp=hmChapICDs.get(strArr[intOrgChFldPos]);
						hmChapICDs.remove(strArr[intOrgChFldPos]);
						hmChapICDs.put(strArr[intOrgChFldPos], strTmp+"\t"+strArr[intICDFldPos]);
					
					}
				}
				
			}
			
			bf.close();
			
			bf=new BufferedReader(new FileReader(strEDensityFile));
			

			bfw=new BufferedWriter(new FileWriter(strRSFile,true));
			
			
			bfw.write(bf.readLine()+"\tttest_pvalue\tchisq_coef\tchisq_p-value"+"\n");
			
			String[] strTmpArr=null;
			
			double[] dblArr=null;
			
			double[] dblSameChArr=null;//用于存储同章的其他编码密度。
			
			long[][] lngEdgeCounts=new long[2][2];
			
			String strOldTmp="";
			
			boolean blRemoved=false;
			
			int intDeg=0;
			
			long lngTmp=0;
			
			ChiSquareTest cst=new ChiSquareTest();
			
			while((strLine=bf.readLine())!=null){
				
				strArr=strLine.split(strSplit);
				
				strTmp=strArr[intOrgChFldPos];
				
				System.out.println(strLine);
				
				if (!strOldTmp.equalsIgnoreCase(strTmp)){ //不需要每一个ICD都进行一次相关章节的编码拆分和计算。
					
					strTmpArr=hmChapEDenses.get(strTmp).split("\t");
					
					dblArr=new double[strTmpArr.length];
					
					for(int i=0;i<dblArr.length ;i++){
						
						dblArr[i]=Double.valueOf(strTmpArr[i]);
						
					}
					
					strOldTmp=strTmp;
					
				}
				
				if(!strArr[intOrgChFldPos].equalsIgnoreCase(strArr[intICDChFldPos])){
					
					lngTmp=(long)(Integer.valueOf(strArr[intICDNumPos])*Double.valueOf(strArr[intEDensFldPos]));
					
					//实际的边数。
					
					lngEdgeCounts[0][0]=lngTmp;
					lngEdgeCounts[1][0]=Integer.valueOf(strArr[intICDNumPos])-lngTmp;
					//期望边数。
					
					if (hmICDDegs.containsKey(strArr[intICDFldPos])){
						intDeg=hmICDDegs.get(strArr[intICDFldPos]);
					}else{
						intDeg=0;
					}
					
					System.out.println(intDeg+"\t"+hmChapICDs.get(strArr[intOrgChFldPos]));
					
					lngTmp=(long)getNodeChapExpectedEdgeNum(intDeg,
					   hmChapICDs.get(strArr[intOrgChFldPos]),"\t",hmICDDegs,lngEdgeNum);
					
					lngEdgeCounts[0][1]=lngTmp;
					if(lngTmp+1>Integer.valueOf(strArr[intICDNumPos])){
						lngEdgeCounts[1][1]=1;
					}else{
						lngEdgeCounts[1][1]=Integer.valueOf(strArr[intICDNumPos])-lngTmp-1;
					}
					
					bfw.write(strLine+"\t"+tt.tTest(Double.valueOf(strArr[intEDensFldPos]), dblArr)
							+"\t"+cst.chiSquare(lngEdgeCounts)+"\t"+cst.chiSquareTest(lngEdgeCounts)+"\n");
					
					
				}else{
					
					dblSameChArr=new double[dblArr.length-1];
					
					blRemoved=false;
					
					for(int i=0,j=0;j<dblArr.length;i++,j++){
						
						if(!blRemoved&&dblArr[j]==Double.valueOf(strArr[intEDensFldPos])){
							i--;
							blRemoved=true;
							
						}else{
							
							dblSameChArr[i]=dblArr[j];
							
						}
						
					}
					
					lngTmp=(long)(Integer.valueOf(strArr[intICDNumPos])*Double.valueOf(strArr[intEDensFldPos]));
					
					lngEdgeCounts[0][0]=lngTmp;
					lngEdgeCounts[1][0]=Integer.valueOf(strArr[intICDNumPos])-lngTmp-1;
					
					//期望边数。
					
					if (hmICDDegs.containsKey(strArr[intICDFldPos])){
						intDeg=hmICDDegs.get(strArr[intICDFldPos]);
					}else{
						intDeg=0;
					}
					
					lngTmp=(long)getNodeChapExpectedEdgeNum(intDeg,
							hmChapICDs.get(strArr[intOrgChFldPos]),"\t",hmICDDegs,lngEdgeNum);
					
					lngEdgeCounts[0][1]=lngTmp;
					
					
					if(lngTmp+1>Integer.valueOf(strArr[intICDNumPos])){
						lngEdgeCounts[1][1]=1;
					}else{
						lngEdgeCounts[1][1]=Integer.valueOf(strArr[intICDNumPos])-lngTmp-1;
					}
					
					
					bfw.write(strLine+"\t"+tt.tTest(Double.valueOf(strArr[intEDensFldPos]), dblSameChArr)
							+"\t"+cst.chiSquare(lngEdgeCounts)+"\t"+cst.chiSquareTest(lngEdgeCounts)+"\n");
					
					//bfw.write(strLine+"\tnull\n");
					
				}
				
			}
			
			bf.close();
			
			bfw.close();
			
			hmCodes.clear();
			hmChapEDenses.clear();
			hmChapICDs.clear();
			hmICDDegs.clear();
			
			System.out.println("Finshed at the time:"+new Date());
			
			
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
		
	}
	
	private double getNodeChapExpectedEdgeNum(
			int intICDDeg,
			String strChapNodes,
			String strSplit,
			HashMap<String,Integer> hmICDDegs,
			long lngEdgeNum){
		
		double dblResult=0.;
		
		int intTmp=0;
		
		String[] strArr=strChapNodes.split(strSplit);
		
		for(int i=0;i<strArr.length;i++){
			if (hmICDDegs.containsKey(strArr[i])){
				
				intTmp=hmICDDegs.get(strArr[i]);
				
			}else{
				intTmp=0;
				
			}
			
			dblResult+=1.0*intTmp*intICDDeg/lngEdgeNum; //lngEdgeNum已经是2倍的边数。
			
		}
		
		return dblResult;
		
	}
	
	
	public static void main(String[] args) throws IOException {
		// TODO Auto-generated method stub
		CategoryModularity cm=new CategoryModularity();
		String strDisCatFile = "E:\\于博诊断一致性分析\\相似性计算\\herb_label.txt";
		String strMolAssFile = "E:\\于博诊断一致性分析\\相似性计算\\herb_similarity0.9.txt";
		String output0 = "E:\\于博诊断一致性分析\\模块度的计算结果\\大于0\\symp.txt";
		String output1 = "E:\\于博诊断一致性分析\\模块度的计算结果\\大于0.1\\symp.txt";
		String output2 = "E:\\于博诊断一致性分析\\模块度的计算结果\\大于0.2\\symp.txt";
		String output3 = "E:\\于博诊断一致性分析\\模块度的计算结果\\大于0.3\\symp.txt";
		String output4 = "E:\\于博诊断一致性分析\\模块度的计算结果\\大于0.4\\symp.txt";
		String output5 = "E:\\于博诊断一致性分析\\模块度的计算结果\\大于0.5\\symp.txt";
		String output6 = "E:\\于博诊断一致性分析\\模块度的计算结果\\大于0.6\\symp.txt";
		String output7 = "E:\\于博诊断一致性分析\\模块度的计算结果\\大于0.7\\herb.txt";
		String output8 = "E:\\于博诊断一致性分析\\模块度的计算结果\\大于0.8\\herb.txt";
		String output9 = "E:\\于博诊断一致性分析\\模块度的计算结果\\大于0.9\\herb.txt";
		Double result=cm.catMolModularity(strDisCatFile, 0, 1, strMolAssFile, 0, true, true, "\t");
		BufferedWriter writer = FileUtil.getWriter(output9);
		writer.write(result.toString());
		writer.newLine();
		writer.flush();
		if(writer != null){
			writer.close();
		}
	}
	/**
	 * 2015.12.30
	 * @author xuezhongzhou
	 * 该函数计算基于章节对节点进行划分后的ICD编码的分子关联模块的模块度。
	 * 如基于共享基因的网络模块度。
	 * @param strDisCatFile 格式：icd\tcat\n //其中的cat是类别和标签。
	 * @param strMolAssFile icd1\ticd2\tw\n.
	 * @param intICDPos                   id所在的位置
	 * @param intICDChapterPos            id被划分的类
	 * @param dblThresholdWeight          权重阈值
	 * @param blGreater                   是大于权重还是小于权重
	 * @param blFirstFldLine              文件第一行是否有用到
	 * @param strSplit                    数据使用什么分割的
	 */
	
	
	public double catMolModularity(
			String strDisCatFile,
			int intICDPos,
			int intICDChapterPos,
			String strMolAssFile,
			double dblThresholdWeight,
			boolean blGreater,
			boolean blFirstFldLine,
			String strSplit){
		
		BufferedReader bf=null;
		//BufferedReader bf2=null;
		
		BufferedWriter bfw=null;
		String strLine=null;
		
		double dblResult=0.;
		
	    String[] strArr=null;
	    
		System.out.println("Start processing at time:"+new Date());
		
		HashMap<String,String> hmICDCat=new HashMap<String,String>();
		
		HashMap<String,Integer> hmCatIds=new HashMap<String,Integer>();
		
		try {
			
			bf=new BufferedReader(new FileReader(strDisCatFile));
			
			int intCatNo=0;
			
			while((strLine=bf.readLine())!=null){
				System.out.println(strLine);
				strArr=strLine.split(strSplit);
				
				hmICDCat.put(strArr[intICDPos], strArr[intICDChapterPos]);
				
				if(!hmCatIds.containsKey(strArr[intICDChapterPos])){
					
					hmCatIds.put(strArr[intICDChapterPos], intCatNo++);
				
				}
				
			}
			
			bf.close();
			
			int[] intEii=new int[hmCatIds.size()];
			
			int[] intAii=new int[hmCatIds.size()];
			
			int intM=0; //The edge number of strMolAssFile.
			
			String strTmp1=null,strTmp2=null;
			
			bf=new BufferedReader(new FileReader(strMolAssFile));
			
			if(blFirstFldLine) bf.readLine();
			
			while((strLine=bf.readLine())!=null){
//				System.out.println(strLine);
				strArr=strLine.split(strSplit);
				
				if(blGreater){
					
					if(Double.valueOf(strArr[2])>=dblThresholdWeight){
						
						intM++;
						
						strTmp1=hmICDCat.get(strArr[0]);
						
						strTmp2=hmICDCat.get(strArr[1]);
						
						if(strTmp1==null||strTmp2==null) 
							continue;
						
						if(strTmp1.equalsIgnoreCase(strTmp2)){
							
							intEii[hmCatIds.get(strTmp1)]++;
							
						}else{
							
							intAii[hmCatIds.get(strTmp1)]++;
							intAii[hmCatIds.get(strTmp2)]++;
						}
						
					}
				}else{
					
					if(Double.valueOf(strArr[2])<dblThresholdWeight){
						
						intM++;
						
						strTmp1=hmICDCat.get(strArr[0]);
						
						strTmp2=hmICDCat.get(strArr[1]);
						
						if(strTmp1==null||strTmp2==null) 
							continue;
						
						if(strTmp1.equalsIgnoreCase(strTmp2)){
							
							intEii[hmCatIds.get(strTmp1)]++;
							
						}else{
							
							intAii[hmCatIds.get(strTmp1)]++;
							intAii[hmCatIds.get(strTmp2)]++;
						}
						
					}
				}
				
				
			}
			
			bf.close();
			
			for(int i=0;i<intEii.length;i++){
				dblResult+=intEii[i]/(2.*intM)-Math.pow(intAii[i]/(2.0*intM), 2);
			}
			
			hmCatIds.clear();
			hmICDCat.clear();
			
			
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		

		return dblResult;
		
	}
	/**
	 * @param args
	 */
//	public static void main(String[] args) {
//		// TODO Auto-generated method stub
//		CategoryModularity cm=new CategoryModularity();
//		//2016.1.14 计算考虑语义相似性范围的章节编码的edge density信息关联。
//		
//		String strFilePre="/Users/xuezhongzhou/Documents/Data/clinical_dismodule/new_2015/";
//		
//		String strDisCatFile=strFilePre+"ICDTree/icd_codes_top_chapters_20151231.txt";
//		
//		String strMolAssFile=strFilePre+"SG/icd_net_sg20151230.txt";
//		strMolAssFile=strFilePre+"sab/dis_ppi_sab_2016.txt";
//		
//		strMolAssFile=strFilePre+"SG/icd_net_sg20151230.txt";
//		
//		String strICDSemSimFile=strFilePre+"icd_linSim_filtered_20151230.txt";
//		
////		cm.catMolSpecPairEdgeDensity(strDisCatFile, 0, 1, strMolAssFile, 1.0, true, true, strICDSemSimFile,
////				0.0, 1.0, strFilePre+"sg/icd_net_sg20151230_nd.txt",1,3,133469, "\t", strFilePre+"SG/icd_net_sg_desity_full.txt", strFilePre+"SG/icd_chap_sg_desityfull.txt");
//		
//		//2016.1.4 计算icd与章节之间的pvalue. 2016.1.7 重新计算，产生对同类也具有p-value的数据
//		
//		String strEDensityFile=strFilePre+"sg/icd_chap_sg_density.txt";
//		
//		 String strRSFile=strFilePre+"sg/icd_chap_sg_density_pvalue3.txt";
//		
//		cm.genSigInterClsEdgeDensity(strEDensityFile, 0, 1, 2, 5, 4, 
//				strFilePre+"sg/icd_net_sg20151230_nd.txt", 1, 3, "\t", strRSFile);
//
//	}
	

}
