package test.zipParser;

import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.io.StringReader;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Set;
import java.util.Map.Entry;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;

import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.Version;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;

public class ZipRunner {

	public static void main(String[] args) {
		long start = System.currentTimeMillis();
		Configuration conf = new Configuration();
		String[] otherArgs = null;
		try {
			otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
			if (otherArgs.length != 4) {
				System.err
						.println("Usage: invertedindex <in> <out> <srcUri> <tmpLocalPath>");
				System.exit(2);
			}
			String stopUri = otherArgs[0].substring(0,
					otherArgs[0].lastIndexOf('/'))
					+ "/stopwords.txt";
			FileSystem fs = FileSystem.get(URI.create(otherArgs[0]), conf);
			fs.copyFromLocalFile(new Path(otherArgs[2]+"/stopwords.txt"),
					new Path(otherArgs[0].substring(0,
							otherArgs[0].lastIndexOf('/'))));
			fs.close();
			// URI.create("hdfs://zhaoxiang:9000/user/hadoop/stopwords.txt")
			DistributedCache.addCacheFile(new URI(stopUri), conf);
			UploadZipFiles upZip = new UploadZipFiles(otherArgs[2],
					otherArgs[0]);
			SimpleDateFormat sf = new SimpleDateFormat("yyyyMMddHHmmssSSS");
			String newFileName = sf.format(Calendar.getInstance().getTime());
			upZip.UploadData(newFileName + ".seq", upZip);
			conf.set("fs.localPath", otherArgs[3]);
			Job job = new Job(conf, "InvertedIndex");
			job.setJarByClass(ZipRunner.class);
			job.setInputFormatClass(SequenceFileInputFormat.class);
			job.setMapperClass(ZipMapper.class);
			job.setReducerClass(ZipReducer.class);

			job.setMapOutputKeyClass(Text.class);
			job.setMapOutputValueClass(Text.class);
			job.setOutputKeyClass(Text.class);
			job.setOutputValueClass(Text.class);
			FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
			FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
			job.waitForCompletion(true);
			long end = System.currentTimeMillis();
			long time = end - start;
			System.out.println("cost " + time / 1000 + " seconds");
			System.exit(0);

		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	public static class ZipMapper extends
			Mapper<Text, BytesWritable, Text, Text> {
		// 索引配置
		private Set<?> stopwords = null; // 停词集合
		private Path[] localFiles = null;
		private Analyzer analyzer = null;
		private Analyzer analyzer2 = new StandardAnalyzer(Version.LUCENE_30); // 分析方法：空格及各种符号分割

		private DocumentBuilderFactory factory;
		private DocumentBuilder builder;

		// 构造方法
		public ZipMapper() {

		}

		public void setup(Context context) {
			Configuration conf = context.getConfiguration();
			try {
				localFiles = DistributedCache.getLocalCacheFiles(conf);
				System.out.println("DistributedCache: " + localFiles);
				for (Path localfile : localFiles) {
					// 获取停词
					if (localfile.getName().endsWith("stopwords.txt")
							&& stopwords == null) {
						System.out.println("获得stopwords.txt");
						BufferedReader br = new BufferedReader(new FileReader(
								localfile.toString()));
						String str = "", s; // 获取停词表
						while ((s = br.readLine()) != null)
							str += s + " ";
						stopwords = StopFilter
								.makeStopSet(str.split("\\s{1,}"));
						br.close(); // 关闭流
					}
				}
				factory = DocumentBuilderFactory.newInstance(); // 获取DOM解析器;
				factory.setIgnoringElementContentWhitespace(true);
				builder = factory.newDocumentBuilder(); // DOM解析器
				analyzer = new PositionalPorterStopAnalyzer(stopwords);// stopwords
			} catch (Exception e) {
				e.printStackTrace();
			}
		}

		// map方法
		public void map(Text filename, BytesWritable value, Context context) {
			try {
				String localPath = context.getConfiguration().get(
						"fs.localPath");
				if (localPath == null)
					localPath = "./";
				else if (!localPath.endsWith("/") && !localPath.endsWith("\\"))
					localPath = localPath + "/";
				String localUri = localPath;
				File f = new File(localUri);
				if (!f.exists())
					f.mkdirs();
				f = new File(localUri + filename);
				writeToLocal(value, f);
				System.out.println("mapper正在处理：" + filename + " : ");
				parseZip(f, context);
				f.delete();
			} catch (Exception e) {
				e.printStackTrace();
			}
		}

		public void parseZip(File f, Context context) {
			try {
				ZipFile zipFile = new ZipFile(f);
				String filenaem = f.getName();
				Enumeration e = zipFile.entries();// .getEntries();
				int i = 0;
				while (e.hasMoreElements()) {
					ZipEntry zipEntry = (ZipEntry) e.nextElement();
					if (!zipEntry.isDirectory()) {
						// 读写文件
						InputStream in = zipFile.getInputStream(zipEntry);
						BufferedInputStream bin = new BufferedInputStream(in);
						HashMap<String, String> name_values = extractText(bin);
						bin.close();
						in.close();
						WordsTable wt = call(name_values);
						writeToHdfs(zipEntry.getName(), wt, context);
						i++;
					}
					if (i % 1000 == 0) {
						Counter counter = context.getCounter("FileCounter:",
								"number");
						counter.increment(1000);
						i = 0;
						System.out.println(filenaem + " number:"
								+ counter.getValue() + " size:"
								+ zipFile.size());
					}
				}
				Counter counter = context.getCounter("FileCounter:", "number");
				counter.increment(i);
				System.out.println(filenaem + " number:" + counter.getValue()
						+ " size:" + zipFile.size());
			} catch (IOException e1) {
				// TODO Auto-generated catch block
				e1.printStackTrace();
			}
		}

		// 抽取文本
		public HashMap<String, String> extractText(InputStream in) {
			Document document = null;
			try {
				document = builder.parse(in);
			} catch (Exception e) {
				e.printStackTrace();
			}
			Element root = document.getDocumentElement(); // 根节点
			NodeList nodeList = root.getChildNodes(); // 子节点
			// 存放解析结果<标签名，内容>
			HashMap<String, String> values = new HashMap<String, String>();
			int wordsAmount = 0;
			for (int i = 0; i < nodeList.getLength(); i++) { // 读取结点内容
				Node node = nodeList.item(i);
				if (node.getNodeType() == Node.ELEMENT_NODE) {
					Element elementNode = (Element) node;
					String name = elementNode.getNodeName().trim();
					String text = "";
					if (name.equals("text")) { // 对text内容做处理name.equals("metadata")
						NodeList nl = elementNode.getChildNodes();
						for (int r = 0; r < nl.getLength(); r++) {
							if (nl.item(r).getNodeType() == Node.ELEMENT_NODE)
								text += " "
										+ nl.item(r).getTextContent().trim();
						}
					} else {
						text = elementNode.getTextContent().trim();
					}
					values.put(name.replaceAll("\'", "\'\'"),
							text.replaceAll("\'", "\'\'"));
				}
			}
			return values;
		}

		// 处理文本
		public WordsTable call(HashMap<String, String> values) {
			WordsTable wt = new WordsTable(); // 记录单词表
			int initialAmount = 0; // 原始单词数
			int filteredAmount = 0; // 过滤后单词数
			try {
				String source = values.get("text");
				BufferedReader br1 = new BufferedReader(
						new StringReader(source));
				BufferedReader br2 = new BufferedReader(
						new StringReader(source));
				TokenStream stream = analyzer.tokenStream("text", br1);
				TermAttribute term1 = stream.addAttribute(TermAttribute.class);
				TokenStream ts = analyzer2.tokenStream("", br2);
				TermAttribute term2 = ts.addAttribute(TermAttribute.class);
				String item1 = null;
				String item2 = null;
				String modifiedText = "";
				while (stream.incrementToken() | ts.incrementToken()) {
					item1 = term1.term();
					item2 = term2.term();
					if (item1.length() > 0 && item1 != null) {
						filteredAmount++;
						String word = item1;
						wt.stat(word);
						modifiedText += word + "  ";
					}
					if (item2.length() > 0 && item2 != null) {
						initialAmount++;
					}
				}
				stream.close();
				ts.close();
				br1.close();
				br2.close();
				wt.setInitialAmount(initialAmount);
				wt.setFilteredAmount(filteredAmount);
			} catch (Exception e) {
				e.printStackTrace();
			}
			return wt;
		}

		public void writeToLocal(BytesWritable value, File f) {
			try {
				FileOutputStream fout = new FileOutputStream(f);
				DataOutput dataOut = new DataOutputStream(fout);
				value.write(dataOut);
				fout.close();
			} catch (Exception ex) {
				ex.printStackTrace();
			}
		}

		public void writeToHdfs(String filename, WordsTable wt, Context context) {
			try {
				Text key = new Text();
				Text value = new Text();
				key.set(filename);
				// value.set(wt.getInitialAmount()+"\t"+wt.getFilteredAmount()+"\t"+wt.getCount());
				// context.write(key, value);
				Iterator<Entry<String, Integer>> iter = wt.getWords();
				while (iter.hasNext()) {
					Entry<String, Integer> entry = (Entry<String, Integer>) iter
							.next();
					String word = entry.getKey();
					int val = entry.getValue();
					key.set(word);
					value.set(filename + ":" + val);
					context.write(key, value);
				}
			} catch (Exception e) {
				e.printStackTrace();
			}
		}
	}

	public static class ZipReducer extends Reducer<Text, Text, Text, Text> {

		private Text result = new Text();

		public void reduce(Text key, Iterable<Text> values, Context context)
				throws IOException, InterruptedException {
			String fileList = new String();
			for (Text value : values) {
				fileList += value.toString() + " ";
			}
			result.set(fileList);

			context.write(key, result);
		}
	}

	public static class WordsTable {
		private int count = 0; // 种类
		private int initialAmount = 0; // 原始单词数
		private int filteredAmount = 0; // 过滤后单词数
		HashMap<String, Integer> words = new HashMap<String, Integer>();

		public void stat(String word) {
			if (words.containsKey(word)) {
				int i = words.get(word); // 处理一个单词，如果向量表中存在则计数加1，否则将其添加到向量表中
				i++;
				words.put(word, i);
				return;
			}// 单词表中无该单词时则将其添加到表中
			else {
				words.put(word, 1);
			}
			this.count++;

		}

		// 返回向量表
		public Iterator<Entry<String, Integer>> getWords() {
			return this.words.entrySet().iterator();
		}

		public int getInitialAmount() {
			return initialAmount;
		}

		public void setInitialAmount(int initialAmount) {
			this.initialAmount = initialAmount;
		}

		public int getFilteredAmount() {
			return filteredAmount;
		}

		public void setFilteredAmount(int filteredAmount) {
			this.filteredAmount = filteredAmount;
		}

		public void setCount(int count) {
			this.count = count;
		}

		// 返回向量表中单词的个数
		public int getCount() {
			return this.count;
		}
	}

	// 分析器，去停词，提词干
	public static class PositionalPorterStopAnalyzer extends Analyzer {
		private Set<?> stopWords;

		public PositionalPorterStopAnalyzer() {
			this(StopAnalyzer.ENGLISH_STOP_WORDS_SET);
		}

		public PositionalPorterStopAnalyzer(Set<?> englishStopWordsSet) {
			this.stopWords = englishStopWordsSet;
		}

		public TokenStream tokenStream(String field, Reader reader) {
			StandardTokenizer tokenStream = new StandardTokenizer(
					Version.LUCENE_30, reader);
			TokenStream result = new StandardFilter(tokenStream);
			return new PorterStemFilter(new StopFilter(true,
					new LowerCaseFilter(result), stopWords));
		}
	}
}
