package lucene;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.StringTokenizer;

import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import utils.Stemmer;
import utils.WordUtil;

import com.mysql.jdbc.Connection;
import com.mysql.jdbc.ResultSet;
import com.mysql.jdbc.Statement;

public class BibIndex {

	private Connection conn;
	private HashMap<String, Long> authorMap = new HashMap<String, Long>();
	private WordUtil stopper = new WordUtil();
	private Stemmer stemmer = new Stemmer();

	public BibIndex() throws Exception {
		Class.forName("com.mysql.jdbc.Driver").newInstance();
		String url = "jdbc:mysql://localhost:3306/" + "bibsonomy";
		String dbuser = "root";
		String dbpass = "123456";
		Connection con = (Connection) DriverManager.getConnection(url, dbuser,
				dbpass);
		this.conn = con;
		initAuthorMap();
	}

	private void initAuthorMap() throws Exception {
		String path = "D:\\CityU\\project\\bibsonomy\\data" + "\\authorMap.txt";
		BufferedReader reader = new BufferedReader(new FileReader(path));
		String line = null;
		while ((line = reader.readLine()) != null) {
			line = line.trim();
			// System.out.println(line);
			String[] tokens = line.split("\t");
			String author = tokens[0];
			String id = tokens[1];
			authorMap.put(author, Long.valueOf(id));
		}

	}

	public void index() throws Exception {
		Statement stmt = null;
		ResultSet rs = null;

		File indexDir = new File("D:\\CityU\\project\\bibsonomy\\"
				+ "\\BibIndex\\");
		if (!indexDir.exists())
			indexDir.mkdir();

		IndexWriter writer = new IndexWriter(FSDirectory.open(indexDir),
				new StandardAnalyzer(Version.LUCENE_CURRENT), true,
				IndexWriter.MaxFieldLength.LIMITED);

		try {
			writer.setUseCompoundFile(false);
			stmt = (Statement) conn.createStatement();

			System.out.println("begin indexing");

			rs = (ResultSet) stmt
					.executeQuery("SELECT content_id, author, title, bibtexAbstract from bibtex where (entrytype = 'article' or 'inproceedings') and author is not null"
							+ " and author != '' and bibtexAbstract is not null;");
			while (rs.next()) {
				String content_id = rs.getString("content_id");

				String author = rs.getString("author");
				author = author.trim();

				HashSet<Long> authors = extractAuthor(author);

				if (authors == null || authors.size() == 0) {
					System.err.println(content_id + "\tnull author\t" + author);
					continue;
				}

				String auStr = "";
				for (long a : authors) {
					auStr = auStr + " " + a;
				}

				//if(auStr.trim().equalsIgnoreCase(""))
				//	continue;

				//System.out.println("indexing doc\t" + content_id + "\tauthors\t" + auStr);				

				String title = rs.getString("title");
				title = title.trim().toLowerCase();

				StringTokenizer contentTokens = new StringTokenizer(title,
						", \t\n\r\f\\?");
				ArrayList<String> stringList = new ArrayList<String>();
				while (contentTokens.hasMoreTokens()) {
					String token = contentTokens.nextToken();
					token = token.trim();
					token = stopper.removeNoise(token);
					if (stopper.isStopWord(token)) {
						continue;
					}
					if (stopper.isNumber(token)) {
						continue;
					}
					if (stopper.isNoise(token)) {
						continue;
					}
					token = stemmer.stem(token);
					stringList.add(token);
				}

				String abs = rs.getString("bibtexAbstract");

				StringTokenizer absTokens = new StringTokenizer(abs,
						", \t\n\r\f");
				while (absTokens.hasMoreTokens()) {
					String token = absTokens.nextToken();
					token = token.trim();
					token = stopper.removeNoise(token);
					if (stopper.isStopWord(token)) {
						continue;
					}
					if (stopper.isNumber(token)) {
						continue;
					}
					if (stopper.isNoise(token)) {
						continue;
					}
					token = stemmer.stem(token);
					stringList.add(token);
				}

				StringBuffer buffer = new StringBuffer();
				for (String s : stringList) {
					buffer.append(s).append(" ");
				}

				String text = buffer.toString();

				Document doc = new Document();

				doc.add(new Field("content", text, Field.Store.YES,
						Field.Index.ANALYZED, Field.TermVector.YES));

				doc.add(new Field("content_id", content_id, Field.Store.YES,
						Field.Index.NOT_ANALYZED, Field.TermVector.NO));

				doc.add(new Field("author", auStr, Field.Store.YES,
						Field.Index.ANALYZED, Field.TermVector.YES));
				writer.addDocument(doc);
			}

		} finally {
			if (rs != null) {
				try {
					rs.close();
				} catch (SQLException sqlEx) {
				} // ignore
				rs = null;
			}

			if (stmt != null) {
				try {
					stmt.close();
				} catch (SQLException sqlEx) {
				} // ignore
				stmt = null;
			}
			writer.close();
		}

	}

	private HashSet<Long> extractAuthor(String au) throws Exception {
		HashSet<Long> authors = new HashSet<Long>();

		if (au.indexOf(" and ") == -1) {
			String str = au.trim();
			str = str.replaceAll("\t", " ");
			str = str.replaceAll("\n", " ");
			str = str.replaceAll("\r", " ");
			str = str.trim();

			char[] chArr = str.toCharArray();
			str = "";
			for (char ch : chArr) {
				String c = transfer(ch);
				str += c;
			}

			str = str.trim();

			Long id = authorMap.get(str);
			if (id == null)
				return null;
			// Long id = 1L;			
			authors.add(id);
			return authors;
		}

		String[] tokens = au.split(" and ");
		for (String str : tokens) {
			str = str.trim();
			str = str.replaceAll("\t", " ");
			str = str.replaceAll("\n", " ");
			str = str.replaceAll("\r", " ");
			str = str.trim();

			char[] chArr = str.toCharArray();
			str = "";
			for (char ch : chArr) {
				String c = transfer(ch);
				str += c;
			}
			str = str.trim();

			Long id = authorMap.get(str);
			// Long id = 1L;
			if (id == null)
				continue;
			authors.add(id);
		}

		return authors;
	}

	private String transfer(char ch) {

		String c = "";
		if (ch >= 65 && ch <= 90) {
			c = Character.toString(ch);
		} else if (ch >= 97 && ch <= 122) {
			c = Character.toString(ch);
		}

		else if (Character.isWhitespace(ch)) {
			c = Character.toString(ch);
		} else {
			c = "_";
		}
		return c;
	}

	public static void main(String[] args) throws Exception {
		BibIndex indexer = new BibIndex();
		indexer.index();
	}

}
