import java.io.*;
import java.util.Properties;
import edu.stanford.nlp.ie.crf.CRFClassifier;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.tagger.maxent.MaxentTagger;


/** This is the code for segmenting and POS-tag chinese files
 *  programmatically.  It assumes input files in UTF8, and 
 *  outputs result file in the directory of input files.
 *  <p/>
 *  <code>
 *  Usage: java -mx1g -cp seg.jar SegDemo fileName
 *  </code>
 *  This will run correctly in the distribution home directory.  To
 *  run in general, the properties for where to find dictionaries or
 *  normalizations have to be set.
 *
 *  @author Evan Cheung
 *  @reference Christopher Manning
 */

public class SegnPOS {
	public static FileWriter fw = null;
	public static BufferedWriter bw = null;
	public static String buffer = null;
	
	/**
	 * ASCII表中可见字符从!开始，偏移位值为33(Decimal)
	 */
	static final char DBC_CHAR_START = 33; // 半角!

	/**
	 * ASCII表中可见字符到~结束，偏移位值为126(Decimal)
	 */
	static final char DBC_CHAR_END = 126; // 半角~

	/**
	 * 全角对应于ASCII表的可见字符从！开始，偏移值为65281
	 */
	static final char SBC_CHAR_START = 65281; // 全角！

	/**
	 * 全角对应于ASCII表的可见字符到～结束，偏移值为65374
	 */
	static final char SBC_CHAR_END = 65374; // 全角～

	/**
	 * ASCII表中除空格外的可见字符与对应的全角字符的相对偏移
	 */
	static final int CONVERT_STEP = 65248; // 全角半角转换间隔

	/**
	 * 全角空格的值，它没有遵从与ASCII的相对偏移，必须单独处理
	 */
	static final char SBC_SPACE = 12288; // 全角空格 12288

	/**
	 * 半角空格的值，在ASCII中为32(Decimal)
	 */
	static final char DBC_SPACE = ' '; // 半角空格

	public static void main(String[] args) throws Exception {
		String sighanCorporaDict = args[0];
		String serDictionary = args[1];
		String classifier = args[2];
		String tagger = args[3];
		String input = args[4];					//Input file for parsing
		String output = args[5];				//Root Directory for parsed files
		String type = args[6];					//Two types: "web" and "tradition"
		for(String s:args) {
			System.out.println(s);
		}
		segAndPOS(input, output, type, sighanCorporaDict, serDictionary, classifier, tagger);
	}
	
	
	/**
	 * <PRE>
	 * 全角字符->半角字符转换  
	 * 只处理全角的空格，全角！到全角～之间的字符，忽略其他
	 * </PRE>
	 */
	public static String qj2bj(String src) {
		if (src == null) {
			return src;
		}
		StringBuilder buf = new StringBuilder(src.length());
		char[] ca = src.toCharArray();
		for (int i = 0; i < src.length(); i++) {
			if (ca[i] >= SBC_CHAR_START && ca[i] <= SBC_CHAR_END) { // 如果位于全角！到全角～区间内
				buf.append((char) (ca[i] - CONVERT_STEP));
			} else if (ca[i] == SBC_SPACE) { // 如果是全角空格
				buf.append(DBC_SPACE);
			} else { // 不处理全角空格，全角！到全角～区间外的字符
				buf.append(ca[i]);
			}
		}
		return buf.toString();
	}
	

	public static Boolean createDirectory(String dir) throws Exception {
	/**
	 * If there is no directory existing, create it.
	 */
		if(!new File(dir).isDirectory()) {
			if(!new File(dir).mkdirs())
				return false;
			else
				return true;
		}
		return true;
	}
	
	public static String replaceEntity(String s) {
	/**
	 * Replace special symbols cause' they are in XML. 
	 */
		s = s.replace("&", "&amp;");
		s = s.replace("<", "&lt;");
		s = s.replace(">", "&gt;");
		s = s.replace("\"", "&quot;");
		s = s.replace("'", "&apos;");
		return s;
	}
	
	public static void webAnalysis(CRFClassifier<CoreLabel> segmenter, MaxentTagger tagger, String rootDir, String[] elements) throws Exception {
		String lineSeg = null, linePOS = null, temp = null;
		String outputFileName = null;
		String dir = null, subDir = null;
		switch(elements[0]) {
		case "ZhangSheng":
			buffer = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<ZhangSheng comment=\"This tag is added in order to correctly handle xml in sax\">\n";
			break;
		case "DOC-ZhangSheng":
			buffer += "<DOC>\n";
			break;
		case "id-ZhangSheng":
			buffer += "<DOCID>" + elements[1] + "</DOCID>\n";
			String[] items = elements[1].split("-");
			dir = items[2];
			if(items[2].length() == 1) {
				dir = "0" + items[2];
			}
			subDir = items[2] + "-" + items[3];
			dir = rootDir + "/" + dir + "/" + subDir;
			if(!createDirectory(dir)) {
				System.out.println("Dir Mk Error!");
				return;
			}
			outputFileName = dir + "/" + elements[1] + ".sgm";
			fw = new FileWriter(outputFileName);
			bw = new BufferedWriter(fw);
			System.out.println("+" + outputFileName);
			break;
		case "SOURCE-ZhangSheng":
			buffer += "<DOCTYPE SOURCE=\"" + replaceEntity(elements[1]) + "\">";
			break;
		case "type-ZhangSheng":
			buffer += replaceEntity(elements[1]) + "</DOCTYPE>\n";
			break;
		case "DATETIME-ZhangSheng":
			if(elements.length == 1) {
				buffer += "<DATETIME></DATETIME>\n";
				break;
			}
			else {
				temp = elements[1];
				lineSeg = segmenter.classifyToString(temp);
				linePOS = tagger.tagTokenizedString(lineSeg);
				linePOS = qj2bj(linePOS);
				buffer += "<DATETIME>" + replaceEntity(linePOS) + "</DATETIME>\n";
				break;
			}
		case "HEADLINE-ZhangSheng":
			if(elements.length == 1) {
				break;
			}
			else {
				temp = elements[1];
				lineSeg = segmenter.classifyToString(temp);
				linePOS = tagger.tagTokenizedString(lineSeg);
				linePOS = qj2bj(linePOS);
				buffer += "<BODY>\n<HEADLINE>\n" + replaceEntity(linePOS) + "\n</HEADLINE>\n";
				break;
			}
		case "TEXT-ZhangSheng":
			buffer += "<TEXT>\n";
			break;
		case "POST-ZhangSheng":
			buffer += "<POST>\n";
			break;
		case "POSTER-ZhangSheng":
			if(elements.length == 1) {
				buffer += "<POSTER></POSTER>\n";
				break;
			}
			else {
				temp = elements[1];
				lineSeg = segmenter.classifyToString(temp);
				linePOS = tagger.tagTokenizedString(lineSeg);
				linePOS = qj2bj(linePOS);
				buffer += "<POSTER>" + replaceEntity(linePOS) + "</POSTER>\n";
				break;
			}
		case "POSTDATE-ZhangSheng":
			if(elements.length == 1) {
				buffer += "<POSTDATE></POSTDATE>\n";
				break;
			}
			else {
				temp = elements[1];
				lineSeg = segmenter.classifyToString(temp);
				linePOS = tagger.tagTokenizedString(lineSeg);
				linePOS = qj2bj(linePOS);
				buffer += "<POSTDATE>" + replaceEntity(linePOS) + "</POSTDATE>\n";
				break;
			}
		case "QUOTE-ZhangSheng":
			buffer += "<QUOTE PREVIOUSPOST=\"\n";
			break;
		case "QUOTELINE-ZhangSheng":
			if(elements.length == 1) {
				break;
			}
			else {
				temp = elements[1];
				lineSeg = segmenter.classifyToString(temp);
				linePOS = tagger.tagTokenizedString(lineSeg);
				linePOS = qj2bj(linePOS);
				buffer += replaceEntity(linePOS) + "&#xA;";
				break;
			}
		case "EndQUOTE-ZhangSheng":
			buffer += "\"/>\n";
			break;
		case "LINE-ZhangSheng":
			if(elements.length == 1) {
				break;
			}
			else {
				temp = elements[1];
				lineSeg = segmenter.classifyToString(temp);
				linePOS = tagger.tagTokenizedString(lineSeg);
				linePOS = qj2bj(linePOS);
				buffer += replaceEntity(linePOS) + "\n";
				break;
			}
		case "EndPOST-ZhangSheng":
			buffer += "</POST>\n";
			break;
		case "EndTEXT-ZhangSheng":
			buffer += "</TEXT>\n</BODY>\n";
			break;
		case "EndDOC-ZhangSheng":
			buffer += "</DOC>\n";
			break;
		case "EndZhangSheng":
			buffer += "</ZhangSheng>";
			bw.write(buffer);
			bw.close();
			fw.close();
			break;
		default:
			break;
		
		}
	}
	
	public static void traditionAnalysis(CRFClassifier<CoreLabel> segmenter, MaxentTagger tagger, String rootDir, String[] elements) throws Exception {
		String lineSeg = null, linePOS = null;
		String outputFileName = null;
		String dir = null, subDir = null;
		switch(elements[0]) {
		case "ZhangSheng":
			buffer = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<ZhangSheng comment=\"This tag is added in order to correctly handle xml in sax\">\n";
			break;
		case "DOC-ZhangSheng":
			buffer += "<DOC ";
			break;
		case "id-ZhangSheng":
			buffer += "id=\"" + elements[1] + "\"";
			dir = elements[1].substring(0, 7).toLowerCase();
			subDir = elements[1].substring(0, 14).toLowerCase();
			dir = rootDir + "/" + dir + "/" + subDir;
			if(!createDirectory(dir)) {
				System.out.println("Dir Mk Error!");
				return;
			}
			outputFileName = dir + "/" + elements[1];
			fw = new FileWriter(outputFileName);
			bw = new BufferedWriter(fw);
			System.out.println("+" + outputFileName);
			break;
		case "type-ZhangSheng":
			buffer += " type=\"" + replaceEntity(elements[1]) + "\">\n";
			break;
		case "HEADLINE-ZhangSheng":
			lineSeg = segmenter.classifyToString(elements[1]);
			linePOS = tagger.tagTokenizedString(lineSeg);
			linePOS = qj2bj(linePOS);
			buffer += "<HEADLINE>\n" + replaceEntity(linePOS) + "\n";
			break;
		case "EndHEADLINE-ZhangSheng":
			buffer += "</HEADLINE>\n";
			break;
		case "DATELINE-ZhangSheng":
			if(elements.length == 1) {
				buffer += "<DATETIME></DATETIME>\n";
				break;
			}
			else {
				lineSeg = segmenter.classifyToString(elements[1]);
				linePOS = tagger.tagTokenizedString(lineSeg);
				linePOS = qj2bj(linePOS);
				buffer += "<DATETIME>" + replaceEntity(linePOS) + "</DATETIME>\n";
				break;
			}
		case "TEXT-ZhangSheng":
			buffer += "<TEXT>\n";
			break;
		case "P-ZhangSheng":
			buffer += "<P>\n";
			break;
		case "PLINE-ZhangSheng":
			lineSeg = segmenter.classifyToString(elements[1]);
			linePOS = tagger.tagTokenizedString(lineSeg);
			linePOS = qj2bj(linePOS);
			buffer += replaceEntity(linePOS) + "\n";
			break;
		case "EndP-ZhangSheng":
			buffer += "</P>\n";
			break;
		case "TLINE-ZhangSheng":
			lineSeg = segmenter.classifyToString(elements[1]);
			linePOS = tagger.tagTokenizedString(lineSeg);
			linePOS = qj2bj(linePOS);
			buffer += replaceEntity(linePOS) + "\n";
			break;
		case "EndTEXT-ZhangSheng":
			buffer += "</TEXT>\n";
			break;
		case "EndDOC-ZhangSheng":
			buffer += "</DOC>\n";
			break;
		case "EndZhangSheng":
			buffer += "</ZhangSheng>";
			bw.write(buffer);
			bw.close();
			fw.close();
			break;
		default:
			break;
		}
	}

	public static void analysis(CRFClassifier<CoreLabel> segmenter, MaxentTagger tagger, String fileName, String type, String rootDir) throws Exception {
		FileReader fr = new FileReader(fileName);
		BufferedReader br = new BufferedReader(fr);
		String line = null;
		buffer = null;
		while((line = br.readLine()) != null) {
			String[] elements = line.split("::::");
			if(type.equals("web")) {
				webAnalysis(segmenter, tagger, rootDir, elements);
			}
			else {
				traditionAnalysis(segmenter, tagger, rootDir, elements);
			}
			
		}
	}
  
	public static void segAndPOS(String filename, String rootDir, String type, String sighanCorporaDict, String serDictionary, String classifier, String taggerPath) throws Exception {
	  
		Properties props = new Properties();
		props.setProperty("sighanCorporaDict", sighanCorporaDict);
		props.setProperty("serDictionary",serDictionary);
		props.setProperty("testFile", filename);
		props.setProperty("inputEncoding", "UTF-8");
		props.setProperty("sighanPostProcessing", "true");
	    
		CRFClassifier<CoreLabel> segmenter = new CRFClassifier<CoreLabel>(props);
		segmenter.loadClassifierNoExceptions(classifier, props);
		
		MaxentTagger tagger = new MaxentTagger(taggerPath);
	  
		analysis(segmenter, tagger, filename, type, rootDir);
	  
	 }
	

} 