package org.toycode.solr.extractor.impl;

import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.poi.poifs.filesystem.DocumentEntry;
import org.apache.poi.poifs.filesystem.DocumentInputStream;
import org.apache.poi.poifs.filesystem.POIFSFileSystem;
import org.apache.poi.util.LittleEndian;
import org.apache.solr.common.SolrInputDocument;
import org.toycode.solr.config.Prop;
import org.toycode.solr.extractor.PropertyExtractor;

/**
 * This implementations of the interface {@linkplain PropertyExtractor}
 * provides the content extraction for the type <em>ms_word</em> and
 * <em>ms_words</em> in config.xml.
 *
 * @author <a href="mailto:DL88250@gmail.com">Liang Ding</a>
 * @version 1.0.0.1, Aug 26, 2009
 */
final public class MSWordExtractor implements PropertyExtractor {

    private final List<String> EXTRACTABLE_TYPES = Arrays.asList("ms_word",
                                                                 "ms_words");

    @SuppressWarnings("unchecked")
    public void process(final Prop prop, final Object value,
                        final SolrInputDocument solrInputDocument) {
        final String type = prop.type;
        if (type.equals(EXTRACTABLE_TYPES.get(0))) {
            final String content = readWord((String) value);
            solrInputDocument.addField("word_content", content);
        } else if (type.equals(EXTRACTABLE_TYPES.get(1))) {
            final List<String> contents = new ArrayList<String>();
            for (final String path : ((Collection<String>) value)) {
                contents.add(readWord(path));
            }
            solrInputDocument.addField("word_content", contents);
        }
    }

    public static void main(String[] args) {
        System.out.println(readWord("src/test/resources/sample1.doc"));
    }

    private static String readWord(final String path) {
        FileInputStream in = null;
        try {
            in = new FileInputStream(path);
            final WordExtractor extractor =
                    new WordExtractor();

            return extractor.extractText(in);
        } catch (FileNotFoundException ex) {
            Logger.getLogger(MSWordExtractor.class.getName()).
                    log(Level.SEVERE, null, ex);
        } catch (final Exception ex) {
            Logger.getLogger(MSWordExtractor.class.getName()).
                    log(Level.SEVERE, null, ex);
        } finally {
            try {
                in.close();
            } catch (IOException ex) {
                Logger.getLogger(MSWordExtractor.class.getName()).
                        log(Level.SEVERE, null, ex);
            }
        }

        return null;
    }

    public List<String> getExtractableTypes() {
        return EXTRACTABLE_TYPES;
    }
}

// The following codes see http://enhydra.javaeye.com/blog/24352 for more details....
class WordExtractor {

    public String extractText(InputStream in) throws IOException {
        ArrayList text = new ArrayList();
        POIFSFileSystem fsys = new POIFSFileSystem(in);

        DocumentEntry headerProps = (DocumentEntry) fsys.getRoot().getEntry(
                "WordDocument");
        DocumentInputStream din = fsys.createDocumentInputStream("WordDocument");
        byte[] header = new byte[headerProps.getSize()];

        din.read(header);
        din.close();
        // Prende le informazioni dall'header del documento
        int info = LittleEndian.getShort(header, 0xa);

        boolean useTable1 = (info & 0x200) != 0;

        //boolean useTable1 = true;

        // Prende informazioni dalla piece table
        int complexOffset = LittleEndian.getInt(header, 0x1a2);
        //int complexOffset = LittleEndian.getInt(header);

        String tableName = null;
        if (useTable1) {
            tableName = "1Table";
        } else {
            tableName = "0Table";
        }

        DocumentEntry table = (DocumentEntry) fsys.getRoot().getEntry(tableName);
        byte[] tableStream = new byte[table.getSize()];

        din = fsys.createDocumentInputStream(tableName);

        din.read(tableStream);
        din.close();

        din = null;
        fsys = null;
        table = null;
        headerProps = null;

        int multiple = findText(tableStream, complexOffset, text);

        StringBuffer sb = new StringBuffer();
        int size = text.size();
        tableStream = null;

        for (int x = 0; x < size; x++) {

            WordTextPiece nextPiece = (WordTextPiece) text.get(x);
            int start = nextPiece.getStart();
            int length = nextPiece.getLength();

            boolean unicode = nextPiece.usesUnicode();
            String toStr = null;
            if (unicode) {
                toStr = new String(header, start, length * multiple, "UTF-16LE");
            } else {
                toStr = new String(header, start, length, "ISO-8859-1");
            }
            sb.append(toStr).append(" ");

        }
        return sb.toString();
    }

    private static int findText(byte[] tableStream, int complexOffset,
                                ArrayList text)
            throws IOException {
        //actual text
        int pos = complexOffset;
        int multiple = 2;
        //skips through the prms before we reach the piece table. These contain data
        //for actual fast saved files
        while (tableStream[pos] == 1) {
            pos++;
            int skip = LittleEndian.getShort(tableStream, pos);
            pos += 2 + skip;
        }
        if (tableStream[pos] != 2) {
            throw new IOException("corrupted Word file");
        } else {
            //parse out the text pieces
            int pieceTableSize = LittleEndian.getInt(tableStream, ++pos);
            pos += 4;
            int pieces = (pieceTableSize - 4) / 12;
            for (int x = 0; x < pieces; x++) {
                int filePos =
                        LittleEndian.getInt(tableStream, pos +
                                                         ((pieces + 1) * 4) +
                                                         (x * 8) + 2);
                boolean unicode = false;
                if ((filePos & 0x40000000) == 0) {
                    unicode = true;
                } else {
                    unicode = false;
                    multiple = 1;
                    filePos &= ~(0x40000000); //gives me FC in doc stream
                    filePos /= 2;
                }
                int totLength =
                        LittleEndian.getInt(tableStream, pos + (x + 1) * 4) -
                        LittleEndian.getInt(tableStream, pos + (x * 4));

                WordTextPiece piece = new WordTextPiece(filePos, totLength,
                                                        unicode);
                text.add(piece);

            }

        }
        return multiple;
    }
}

class WordTextPiece {

    private int _fcStart;
    private boolean _usesUnicode;
    private int _length;

    public WordTextPiece(int start, int length, boolean unicode) {
        _usesUnicode = unicode;
        _length = length;
        _fcStart = start;
    }

    public boolean usesUnicode() {
        return _usesUnicode;
    }

    public int getStart() {
        return _fcStart;
    }

    public int getLength() {
        return _length;
    }
}
