package com.lv.lucence;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;

import java.io.*;
import java.util.ArrayList;
import java.util.List;

/**
 * Created by lvjilei on 2018/3/29.
 */
public class TxtFileIndexer {


    public static void main(String[] args) {
        try {
            File dataFileDir = new File("/Library/mySource/lucence/data");
            File indexFileDir = new File("/Library/mySource/lucence/index");

            Directory indexDir = FSDirectory.open(indexFileDir.toPath());

            Analyzer analyzer = new StandardAnalyzer();
            IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
            IndexWriter indexWriter = new IndexWriter(indexDir, indexWriterConfig);

            Document document = null;
            StringBuffer sb = null;
            String lineContent = null;
            BufferedReader br = null;
            File[] files = dataFileDir.listFiles();
            for (File file : files) {
                if (file.isFile() && file.getName().endsWith(".txt")) {
                    sb = new StringBuffer();
                    br = new BufferedReader(
                            new InputStreamReader(
                                    new FileInputStream(file), "gbk"));
                    while ((lineContent = br.readLine()) != null) {
                        sb.append(lineContent);
                    }
                    document = new Document();

                    document.add(new TextField("name", file.getName(), Field.Store.YES));
                    document.add(new TextField("path", file.getCanonicalPath(), Field.Store.YES));
                    document.add(new TextField("content",sb.toString(), Field.Store.YES));

                    indexWriter.addDocument(document);
                    for (String s : TxtFileIndexer.getWords(file.getCanonicalPath(),analyzer)){
                        System.out.print(s+"\t");
                    }
                    System.out.println();
                }
            }
            indexWriter.close();
        } catch (IOException e) {
            e.printStackTrace();
        }

    }


    public static List<String> getWords(String str, Analyzer analyzer) {
        List<String> result = new ArrayList<String>();
        TokenStream stream = null;
        try {
            stream = analyzer.tokenStream("content", new StringReader(str));
            CharTermAttribute attr = stream.addAttribute(CharTermAttribute.class);
            stream.reset();
            while (stream.incrementToken()) {
                result.add(attr.toString());
            }
        } catch (IOException e) {
            e.printStackTrace();
        } finally {
            if (stream != null) {
                try {
                    stream.close();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        }
        return result;

    }
}
