package nlpir;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;

import com.sun.jna.Library;
import com.sun.jna.Native;

public class NLPIR_FileString {
//	public static 
	private int count=0;;

	public int getCount() {
		return count;
	}

	public void setCount(int count) {
		this.count = count;
	}

	// 定义接口CLibrary，继承自com.sun.jna.Library
	public interface CLibrary extends Library {
		// 定义并初始化接口的静态变量
		CLibrary Instance = (CLibrary) Native.loadLibrary(
				new File("Fenci2014\\bin\\NLPIR").getAbsolutePath(), CLibrary.class);

		public int NLPIR_Init(String sDataPath, int encoding,
				String sLicenceCode);

		public String NLPIR_ParagraphProcess(String sSrc, int bPOSTagged);

		public String NLPIR_GetKeyWords(String sLine, int nMaxKeyLimit,
				boolean bWeightOut);

		public String NLPIR_GetFileKeyWords(String sLine, int nMaxKeyLimit,
				boolean bWeightOut);

		public int NLPIR_AddUserWord(String sWord);// add by qp 2008.11.10

		public int NLPIR_DelUsrWord(String sWord);// add by qp 2008.11.10

		public String NLPIR_GetLastErrorMsg();

		public int NLPIR_ImportUserDict(String sUserDict);

		public int NLPIR_SaveTheUsrDic();

		public double NLPIR_GetUniProb(String sWord);

		public boolean NLPIR_IsWord(String sWord);

		public void NLPIR_Exit();
	}

	/**
	 * 统计词频，并将词频结果存储到磁盘，文件中分词结果按行存储。
	 * 
	 * @param source   要进行词频统计的分词后的文本文件
	 * @param dest 词频结果存储文件
	 * @return void
	 */
	public void cipinFile(String source, String dest) {
		String str = null;
		String str2=null;
		BufferedWriter bw = null;
		BufferedReader bf1,bf2 = null;
		StringBuffer sb = null;
		int i=0;
//		System.out.println("词语的总数为："+count);
		//防止数组越界
		int[] countAry = new int[count+1];
		sb = new StringBuffer();
		try {
			bf1 = new BufferedReader(new FileReader(source));
			bf2 = new BufferedReader(new FileReader(source));
			bw = new BufferedWriter(new FileWriter(dest));
			while ((str = bf1.readLine()) != null) {
				sb.append(str);
			}
			String str1 = sb.toString();
//			System.out.println("str1的长度："+str1.length());
			while ((str2 = bf2.readLine()) != null ) {
//				System.out.println("词语的长度:"+str2.length());
				//此函数有缺陷。（O-O）当左右括号被分开时（每个括号占一行），会报错
				countAry[i] = (str1.length() - str1.replaceAll(str2, "").length())/ str2.length();
				bw.write("" + countAry[i]);
				bw.newLine();
				bw.flush();
				i++;
			}
			
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}finally {
			try {
				bw.close();
				bw.close();
			} catch (IOException e) {
				e.printStackTrace();
			}
		}
	}

	/**
	 * 计算信息熵，并将信息熵结果存储到磁盘，文件中分词结果按行存储。
	 * 
	 * @param path1 是分词后每个词占一行的文本文件
	 * @param path2 是词频存储文件
	 * @param path3 是信息熵存储的文件
	 */
	public void xinxishang(String path1, String path2, String path3) {
		double H=.0;
//		double H1 = .0;
		double p = 0;
		String q = null;
		String w = null;
		File file = new File(path1);
		File files = new File(path2);
		File file1 = new File(path3);
		FileReader fr1 = null;
		FileReader fr2 = null;
		FileWriter fw = null;
		StringBuffer sb=null;
		BufferedReader bf = null;
		BufferedReader bfs = null;
		BufferedWriter bw = null;
		try {
			fr1 = new FileReader(file);
			fr2 = new FileReader(files);
			fw = new FileWriter(file1);
			sb = new StringBuffer();
			bf = new BufferedReader(fr1);
			bfs = new BufferedReader(fr2);
			bw = new BufferedWriter(fw);
//			System.out.println("计算信息熵值时的词语总数为："+getCount());
			
			//按行读取分词 结果文件和 词频结果文件
			while ((q = bf.readLine()) != null && (w = bfs.readLine()) != null) {
					int a = Integer.parseInt(w);
					p = (double) a / getCount();
				H = -(p * Math.log(p) / Math.log(2));
//				H1 +=H;// H = -∑Pi*log2(Pi)
				bw.write("" + H);
				bw.newLine();
			}
//			bw.write("" + H1);
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} finally {

			try {
				bw.close();
				bfs.close();
				bf.close();
				fw.close();
				fr2.close();
				fr1.close();
			} catch (IOException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}

		}
	}

	/**
	 * 对磁盘上的一个文本文件进行中文分词，并将结果存到磁盘上，文件中分词结果按行存储。
	 * 
	 * @param path (String)要分词的文件路径
	 * @param result (String)分词结果存储路径
	 * @return String 返回分词结果
	 */
	public String splitFile(String path, String result) {

		File file = new File(path);
		File file1 = new File(result);
		FileWriter fw = null;
		BufferedWriter bw = null;
		String str = null;
		String nativeBytes = null;
		StringBuffer sb = new StringBuffer();
		FileInputStream fileinput = null;
		BufferedReader buffer = null;
		File filePath=new File("Fenci2014");
		String pathStr=file.getAbsolutePath();
		try {
			fw = new FileWriter(file1);
			bw = new BufferedWriter(fw);
			
			int charset_type = 1;

			int init_flag = CLibrary.Instance.NLPIR_Init(pathStr, charset_type,
					"0");

			if (0 == init_flag) {
				nativeBytes = CLibrary.Instance.NLPIR_GetLastErrorMsg();
				System.err.println("初始化失败！fail reason is " + nativeBytes);

			}
			fileinput = new FileInputStream(file);
			buffer = new BufferedReader(new InputStreamReader(fileinput));
			// 读取文本所有内容
			while ((str = buffer.readLine()) != null) {
				sb.append(str);
			}
			String str1 = sb.toString();
			try {
				nativeBytes = CLibrary.Instance.NLPIR_ParagraphProcess(str1, 0);
				// String nativeByte = nativeBytes.replaceAll("", "0");
				//将分词的结果按照空格赋给字符串数组
				String[] s = nativeBytes.split(" ");
				for (int i = 0; i < s.length; i++) {
					if (s[i].equals(""))
						;
					else {
						count++;
						bw.write(s[i]);
						bw.newLine();
						bw.flush();
					}
				}
//				System.out.println("分词结束后的词语总数为："+getCount());
				CLibrary.Instance.NLPIR_Exit();
			} catch (Exception e) {
				e.printStackTrace();
			}
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		} finally {
			try {
				buffer.close();
				fileinput.close();
				bw.close();
				fw.close();
			} catch (IOException e) {
				e.printStackTrace();
			}
		}
		return nativeBytes;
	}
	
}
