#!/bin/sh

W=""
#accumulate all the page names used in all the *.wiki files
#(can't just cat *.wiki because the files aren't guranteed to end with a \n)
for p in *.wiki
do
    #yes this is one line. The simplified pipeline is
    # cat | tr 1 | sed | awk 1 | awk 2 | tr 2 | awk 3 | awk 4
    # tr1 - strips out punctuation and formatting chars we don't care about
    # sed - removes single line `` and {{{}}} to simplify awks 1 and 2
    # awk 1 - removes multiline ``
    # awk 2 - removes multiline {{{}}}
    # tr 2 - change spaces to newlines to simplify parsing
    # awk 3 - simplify forced links
    # awk 4 - match only wikilinks
    W=$W"
"$(cat $p | tr '"*_.,|^~=(:);-' " " | sed '
    s/`.*`//g
    s/{{{.*}}}//g
' | awk '
    BEGIN { esc = 0 }
    /`/ {
        esc = !esc
        if (esc) {
            print substr($0, index($0, "`"))
        } else {
            print substr($0, 1, index($0, "`"))
        }
    }
    { if (!esc) print }
' | awk '
    BEGIN { code = 0 }
    /{{{/ { code++; next }
    { if (!code) print }
    /}}}/ { code--; next }
' | tr " " "
" | awk '
    BEGIN { brak = 0 }
    /\[/ {
        brak = match($0, /\]/)
        print $0
        next
    }
    /\]/ { brak = 0; next }
    { if (!brak) print }
' | awk '
    /^\[.*#/ { print substr($1, 2, index($1, "#") - 2); next }
    /^\[.*\]/ { print substr($1, 2, length($1) - 2); next }
    /^\[.*/ { print substr($1, 2); next }
    /^[A-Z][a-z]+[A-Z][a-zA-Z]*/ { print }
    { next }
')
done

# cut out all the duplicates
W=`echo $W | tr " " "
" | sort | uniq`

#go through the accumulated possible pages and remove any items that map to
#existing pages
for n in `ls -1 *.wiki | sed 's@\(.*\)\.wiki@\1@'`
do
    for w in $W
    do
        if test $w == $n
        then
            W=`echo $W | sed 's/'$w'//'`
            break
        fi
    done
done

#we just have the pages that don't exist left
echo $W | tr " " "
"
