package idx;

import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.*;
import org.apache.lucene.util.Version;
import java.io.*;
import java.util.*;

import java.io.IOException;

public class TestLucene {
  @SuppressWarnings("deprecation")
public static void main(String[] args) throws IOException, ParseException {
    // 0. Specify the analyzer for tokenizing text.
    //    The same analyzer should be used for indexing and searching
    StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_34);

    // 1. create the index
    Directory index = new RAMDirectory();
    //FSDirectory index = FSDirectory.open(new File("a.idx"));

    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_34, analyzer);

    IndexWriter w = new IndexWriter(index, config);
    addDoc(w, "今天天气不错哦");
    addDoc(w, "可惜我不想去");
    
    addDoc(w, "今天天气真的挺好的不错哦气不错气不错哦不错");
    addDoc(w, "The Art of Computer Science");
    addDoc(w, "Shit Happens");
    File a = new File("a");
    File b = new File("b");
   // addFile(w, "a");
    //addFile(w, "b");
    w.optimize();
    w.close();

    // 2. query
    String querystr = args.length > 0 ? args[0] : "气不错哦";

    // the "title" arg specifies the default field to use
    // when no field is explicitly specified in the query.
    Query q = new QueryParser(Version.LUCENE_34, "text", analyzer).parse(querystr);

    // 3. search
    int hitsPerPage = 10;
    IndexSearcher searcher = new IndexSearcher(index, true);
    TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true);
    searcher.search(q, collector);
    ScoreDoc[] hits = collector.topDocs().scoreDocs;
    
    // 4. display results
    System.out.println("Found " + hits.length + " hits.");
    for(int i=0;i<hits.length;++i) {
      int docId = hits[i].doc;
      Document d = searcher.doc(docId);
      
      //BufferedReader reader = new BufferedReader(d.getFieldable("title"));
     // String result = "";
      //String temp = "";
     // while ((temp = reader.readLine()) != null)
    	//  result += temp;
     // Iterator it = d.getFields().iterator();
      //for(;it.hasNext();)
      //{
    	//  String name = ((Field)it.next()).name();
    	  //System.out.println(name);
      //}
      //if( d.getFieldable("title") == null)
    	//  System.out.println("SHIT");
      System.out.println((i + 1) + ". " + d.get("text"));
    }

    // searcher can only be closed when there
    // is no need to access the documents any more. 
    searcher.close();
  }

  private static void addDoc(IndexWriter w, String value) throws IOException {
    Document doc = new Document();
    //doc.add(new Field("title", value, Field.Store.YES, Field.Index.ANALYZED));
    doc.add(new Field("text", value, Field.Store.YES, Field.Index.ANALYZED));
    w.addDocument(doc);
  }
  private static void addFile(IndexWriter w, String a) throws IOException {
    Document doc = new Document();
    if(new FileReader(a) == null)
    	System.out.println("SSSS");
    doc.add(new Field("title", new FileReader(a)));
    doc.add(new Field("text", "shit", Field.Store.YES, Field.Index.ANALYZED));
    w.addDocument(doc);
  }
}