package corpustools;
import strings.AhoCorasickTST;
import strings.CharIterator;
import strings.SetMatch;
import swutils.StdOut;
import swutils.In;
import swutils.BinaryIn;
import swutils.BinaryOut;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.util.Set;
import java.util.Map;
import java.util.HashSet;
import java.util.List;
import java.util.ArrayList;

/**
 * A concordance validator for Utf-8. Non-utf-8 or char's from
 * unexpected Unicode blocks are seen as corruptions.
 */
public class ValidUtf8 {

    /** Exact set matcher, for example: Aho Corasick automaton. */
    private SetMatch match;

    /** Set of search byte sequences for the SetMatch. */
    private List<List<String>> outputs;

    /**
     * We could specify the patterns directly as: String[] words =
     * {"1", "32", "422", "5222"}; But maybe it's nicer to specfify
     * characters that will be normalized in each of these ways. Then
     * there is consistency: the search patterns are normalized in the
     * same way as the text.  As an extra touch, I put patterns for
     * newlines (Mac,Dos,Unix).  If one is expecting newlines of a
     * particular sort, then any other sort of newline can be seen as
     * a minor sort of corruption
     */
    private String[] words = {"a", "б", "€", "𠀊", "\r\n", "\r", "\n"};



    /**
     * Constructor.
     * @param match0 an exact set matcher, Aho-Corasick automaton, for example
     */
    public ValidUtf8(SetMatch match0) {
        this.match = match0;
        outputs = new ArrayList<List<String>>();
        for (String s : words) {
            outputs.add(put(s));
        }
        match.compile();
    }

    /**
     * Add a search string to automaton as a sequence of bytes.
     * @param s the string to add
     * @return a list for the byte sequence that was added
     */
    public List<String> put(String s) {
        List<String> normalized = new ArrayList<String>();
        InputStream is = null;
        try {
            is = new ByteArrayInputStream(s.getBytes("UTF-8"));
        } catch (java.io.UnsupportedEncodingException e) {
            System.out.println(e);
        }
        BinaryIn in = new BinaryIn(is);
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        BinaryOut out = new BinaryOut(baos);

        String[] stringRep = new String[1];
        while (!in.isEmpty()) {
            char c = Utf8NormalizedInput.normalize(in.readChar(),
                                                   stringRep);
            normalized.add(stringRep[0]);
            out.write(c);
        }
        out.flush();
        s = baos.toString();
        match.put(s);

        return normalized;
    }

    /**
     * Search method for the corruptions.
     * @param norm the normalized text, represented as an iterator over chars
     * @param detect the callback
     */
    private void search(CharIterator norm, CorruptionDetection detect) {
        detect.reset();
        match.setMatchAction(detect);
        match.search(norm);
    }

    /**
     * Make concordance for corrupted or unexpected characters.
     * @param file the suspect text
     * @param radius space around key word in concordance
     * @param expectedBlocks expected blocks for a given text.
     */
    public void validateFile(String file, int radius,
                             Character.UnicodeBlock[] expectedBlocks) {
            System.out.println(file);
            Set<Character.UnicodeBlock> blocks =
                new HashSet<Character.UnicodeBlock>();
            for (Character.UnicodeBlock b : expectedBlocks) {
                blocks.add(b);
            }
            CorruptionDetection detect =
                new CorruptionDetection(radius, blocks, outputs);

            BinaryIn bin = new BinaryIn(file);
            if (!bin.exists()) { return; }
            Utf8NormalizedInput norm = new Utf8NormalizedInput(bin, detect);
            search(norm, detect);
            detect.flush();
            Map<Character.UnicodeBlock, Set<Character>>
                unexpectedBlocks = detect.getUnexpected();
            for (Character.UnicodeBlock b : unexpectedBlocks.keySet()) {
                System.out.println(b + ": " + unexpectedBlocks.get(b));
            }
    }


    /**
    * Program to apply validator to a file.
    * @param args args[0] is filename of file with list of expected
    * Unicode blocks. args[1],args[2],... are names of files to be
    * validated.
    */
    public static void main(String[] args) {

        final int RADIUS = 20;

        SetMatch match = new AhoCorasickTST();

        List<Character.UnicodeBlock>  selected =
            new ArrayList<Character.UnicodeBlock>();

        In in = new In(args[0]);

        try {
            while (!in.isEmpty()) {
                String s = in.readLine();
                Character.UnicodeBlock u = Character.UnicodeBlock.forName(s);
                StdOut.println("Selected = " + u);
                selected.add(u);
            }
        } catch (Exception e) {
            System.out.println(e);
        }


        Character.UnicodeBlock[] blocks =
            new Character.UnicodeBlock[selected.size()];
        for (int i = 0; i < selected.size(); i++) {
            blocks[i] = selected.get(i);
        }

        for (int i = 1; i < args.length; i++) {
            String file = args[i];
            ValidUtf8 validator = new ValidUtf8(match);
            validator.validateFile(file, RADIUS, blocks);
        }
    }
}
