#! /usr/bin/tclsh
# $Id$
# Some sketches to parse Plucker Documents (http://www.plkr.org)
# http://cvs.plkr.org/index.cgi/*checkout*/docs/DBFormat.html?rev=HEAD&content-type=text/html

namespace eval ::tclplkr {
	# Palm OS Epoch:
	variable timebase [clock scan "1904-01-01 00:00 UTC"]
}

set dir [file dir [info script]]
source [file join $dir utils.tcl]
source [file join $dir charsets.tcl]
source [file join $dir unzip.tcl]

proc ::tclplkr::sanity {action args} {
  switch -- $action {
    error {
      set alen [llength $args]
      if {$alen == 0 || ($alen > 1 && ($alen % 2 == 0))} {
        return -code error "sytnax error. usage: sanity error ?-topic topic? message"
      }
      set ec [list TCLPLKR EBADFORMAT]
      set msg [lindex $args end]
      uplevel 1 [list return -code error -errorcode $ec $msg]
    }
	assert {
		set alen [llength $args]
		if {$alen < 1 || $alen > 2} {
			return -code error "syntax error. usage: sanity assert EXPRESSION ?MESSAGE?"
		}
		lassign $args exp msg
		if {$msg == ""} { set msg $exp }
		if {![expr $exp]} {
			uplevel 1 [list sanity error $msg]
		}
	}
    default {
      return -code error [format "bad option \"%s\", must be one of error, assert" $action]
    }
  }
}

proc ::tclplkr::palmtime {action timestamp} {
	variable timebase

	switch -glob -- $action {
		*from {
			expr {$timebase + $timestamp}
		}
		*to {
			expr {$timestamp - $timebase}
		}
		default {
			sanity error [format "bad action,\
				must be one of: convertfrom, convertto" $action]
		}
	}
}

proc ::tclplkr::open_input vsize {
	global argv0 argv

	set arglen [llength $argv]
	if {$arglen != 1} {
		puts stderr "Usage: [file tail $argv0] FILENAME"
		exit 1
	} else {
		set fname [lindex $argv 0]
		set in [open $fname]
		upvar 1 $vsize size
		set size [file size $fname]
	}

	fconfigure $in -translation binary

	set in
}

# Synopsis:
#   Checked read from a channel.
# Purpose:
#   Reads $amount characters from the current position in $chan and
#   checks whether the number of read characters match the number
#   requested.  If it matches, the data that was read is returned,
#   otherwise the sanity error with "premature end of file" is raised in
#   the caller's callframe.
# Input:
#   Channel id of the opened Plucker database;
#   Amount of characters to read.
# Output:
#   The data that was read, if the read operation succeeded.
# Side effects:
#   The current position of $chan is moved forward by $amount or to its
#   end position.
proc ::tclplkr::ckread {chan amount} {
	set data [read $chan $amount]
	if {[string length $data] < $amount} {
		uplevel 1 [list sanity error "premature end of file"]
	}

	set data
}

proc ::tclplkr::getasciiz {data voff} {
	upvar 1 $voff off
	set from $off
	set ix [string first \0 $data $from]
	if {$ix > 0} {
		set off [expr {$ix + 1}]
		string range $data $from [expr {$ix - 1}]
	} else {
		sanity error "NUL-terminator not found"
	}
}

proc ::tclplkr::check_docName_sanity in {
	set off 0
	if {[catch { set title [getasciiz $in off] } err]} {
		sanity error $err
	}
	set chars [split $title ""]
	foreach c [lrange $chars 0 end-1] {
		set code [scan $c %c]
		if {$code < 0x20 || $code > 0x7E} {
			sanity error [format "forbidden character: 0x%02x" $code]
		}
	}
}

proc ::tclplkr::parse_db_header {chan vfields} {
	seek $chan 0
	set hdr [ckread $chan 72]

	bscan $hdr a32SuSuIuIua8IuIa8I \
		docName flags version creationDate modificationDate \
		unused1 appInfoOffset sortInfoId magic unused2

	# Sanity checks:

	if {![string equal $magic DataPlkr]} {
		sanity error [format "invalid magic string: must be \"DataPlkr\",\
			given: %s" $magic]
	}

	if {$version != 1} {
		sanity error [format "unknown version: must be 1, given: %d" $version]
	}

	if {[catch {check_docName_sanity $docName} err]} {
		sanity error [format "invalid docName: %s" $err]
	}

	# Checks passed, fill the caller's array:

	upvar 1 $vfields fields

	set fields(docName) $docName ;# TODO actually we should convert it to UTF-8 explicitly
	set fields(version) $version
	set fields(appInfoOffset) $appInfoOffset
	set fields(creationDate)     [palmtime convertfrom $creationDate]
	set fields(modificationDate) [palmtime convertfrom $modificationDate]

	set fields(flags,CopyPrevention) [expr {!!($flags & 0x0040)}]
	set fields(flags,Launchable)     [expr {!!($flags & 0x0200)}]
	set fields(flags,Backup)         [expr {!!($flags & 0x0008)}]
}

# Purpose:
#   Parse the "Record-ID List" block.
# Input:
#   Channel id for an opened Plucker database.
# Output:
#   List of offsets (counting from the start of the document)
#   of "data records".
#   Element at index 0 always points to the "Index Record"
#   so the list always has at least one element.
# Side-effects:
#   Channel access position is moved to the byte just after the
#   end of the Record-ID List.
proc ::tclplkr::parse_record_id_list chan {
	seek $chan 72
	set data [ckread $chan 6]

	bscan $data IuSu nextRecordListId numRecords

	if {$numRecords < 1} {
		sanity error "Record-ID List specify number of records less than 1"
	}

	set out {}
	for {set n 0} {$n < $numRecords} {incr n} {
		set data [ckread $chan 8]

		bscan $data Iuaa3 recordOffset attributes uniqueID
		lappend out $recordOffset
	}

	ckread $chan 2 ;# read two bytes of padding

	set out
}

# Purpose:
#   Create mapping from UIDs of data records to their offsets
#   in the database file, sizes, and indices in the Record-ID
#   list using the list of data record offsets
#   aqcuired by [parse_record_id_list].
# Input:
#   chan:   channel id of an opened Plucker database file.
#   recids: list of offsets of the data records;
#     this is what [parse_record_id_list] returns.
#   fsize:  file size in bytes.
#   vmap:   name of the array in the caller's callframe
#     to be populated by the created mapping.
# Output:
#   none
# Side effects:
#   Current position of $chan is changed.
#   While it's changed in a predictable manner, it's better not
#   to be relied upon.
proc ::tclplkr::describe_data_records {chan recids fsize vmap} {
	upvar 1 $vmap map
	set ix 0
	set prev -1
	foreach off $recids {
		seek $chan $off
		set data [ckread $chan 2]
		bscan $data Su uid
		set map($uid,index) $ix
		set map($uid,offset) $off
		if {$prev != -1} {
			set map($prev,size) [expr {$off - $map($prev,offset)}]
		}
		set prev $uid
		incr ix
	}
	set map($uid,size) [expr {$fsize - $off}]

	# TODO implement size sanity checks
}

# TODO seems like we need some global per-document storage
# so that we could pass just a handle on it to the procs
# like this one:
proc ::tclplkr::parse_index_record {chan offset vcomp vresrecs} {
	seek $chan $offset
	set data [ckread $chan 6]

	bscan $data SuSuSu uid version records

	# Sanity check:
	if {$uid != 0x0001} {
		sanity error [format "expected Index Record UID 0x0001,\
			got: 0x%04x" $uid]
	}

	upvar 1 $vcomp comp $vresrecs resrecs

	if {$version == 0x0001} {
		set comp doc
	} elseif {$version == 0x0002} {
		set comp zlib
	} else {
		sanity error [format "unknown compression method: 0x%04x,\
			expected: 0x0001, 0x0002" $version]
	}

	# Maps numeric "names" of reserved records to their mnemonics.
	# Numbers are implied by indices.
	set names {
		home.html
		external_bookmarks
		URL_handling
		default_category
		additional_metadata
		page_list_metadata
		sorted_URL_name_data
		external_anchor_name_data
	}
	set max [expr {[llength $names] - 1}]

	for {set n 0} {$n < $records} {incr n} {
		set data [ckread $chan 4]

		bscan $data SuSu name ID

		if {$name < 0 || $name > $max} {
			sanity error [format "invalid reserved record name: %d,\
				must be in range 0..7" $name]
		}

		set resrecs([lindex $names $name]) $ID
	}

	# TODO if implementing paradoid sanity mode add "seen" array and
	# ensure no single name has been seen more than once
}

proc ::tclplkr::parse_data_record_header {chan offset vfields} {
	seek $chan $offset
	
	set data [ckread $chan 8]

	upvar 1 $vfields fields

	# Maps numeric "types" of data records to their mnemonics.
	# Numbers are implied by indices.
	# "DATATYPE_" prefixes of mnemonics are omitted.
	set types {
		PHTML
		PHTML_COMPRESSED
		TBMP
		TBMP_COMPRESSED
		MAILTO
		LINK_INDEX
		LINKS
		LINKS_COMPRESSED
		BOOKMARKS
		CATEGORY
		METADATA
		STYLE_SHEET
		FONT_PAGE
		TABLE
		TABLE_COMPRESSED
		COMPOSITE_IMAGE
		PAGELIST_METADATA
		SORTED_URL_INDEX
		SORTED_URL
		SORTED_URL_COMPRESSED
		EXT_ANCHOR_INDEX
		EXT_ANCHOR
		EXT_ANCHOR_COMPRESSED
	}
	set min 0; set max [expr {[llength $types] - 1}]

	bscan $data SuSuSucc fields(uid) fields(paragraphs) \
		fields(size) type flags

	if {$type < $min || $type > $max} {
		sanity error [format "invalid data record type: %d" $type]
	}
	set fields(type) [lindex $types $type]

	set fields(flags,continued) [expr {!!($flags & 0x01)}]
	set fields(flags,navigationMetadata) [expr {!!($flags & 0x02)}]

	if {$fields(flags,navigationMetadata)} {
		bscan [ckread 2] Su fields(navMetadataOffset)
	}
}

proc ::tclplkr::parse_metadata_record {chan offset size vfields vexpncsets} {
	parse_data_record_header $chan $offset hdr
	
	if {![string equal $hdr(type) METADATA]} {
		sanity error "type of metadata record is not DATATYPE_METADATA"
	}

	# TODO calc running size then compare with passed reference value

	set data [ckread $chan 2]
	bscan $data Su recNum

	upvar 1 $vfields fields $vexpncsets expncsets

	set types {
		none
		CharSet
		ExceptionalCharSets
		OwnerID
		Author
		Title
		PublicationDate
		LinkedDocuments
	}
	set min 1; set max [expr {[llength $types] - 1}]

	for {set i 0} {$i < $recNum} {incr i} {
		set data [ckread $chan 4]
		bscan $data SuSu type len

		if {$type < $min || $type > $max} {
			sanity error [format "invalid metadata field type: %s" $type]
		}
		set topic [lindex $types $type]

		set datalen [expr {2 * $len}]
		set data [ckread $chan $datalen]

		switch -exact -- $topic {
			CharSet {
				sanity assert {[string length $data] == 2}
				bscan $data Su fields(CharSet)
			}
			ExceptionalCharSets {
				for {set off 0} {$off < $len / 4} {incr off 4} {
					bscan $data @${off}SuSu uid mibenum
					set expncsets($uid) $mibenum
				}
			}
			OwnerID {
				sanity assert {[string length $data] == 4}
				bscan $data Iu fields($topic)
			}
			Author -
			Title  {
				set off 0
				set fields($topic) [getasciiz $data off]
			}
			PublicationDate {
				bscan $data Iu date
				set fields($topic) [palmtime from $date]
			}
			LinkedDocuments {
				# TODO implement
			}
		}
	}

	if {![info exists fields(CharSet)]} {
		set fields(CharSet) [enc2iana iso8859-1]
	}
	# Post process textual fields dependent on default charset:
	# TODO handle case with unknown/unsupported charset
	set cset [mibenum totcl $fields(CharSet)]
	foreach topic {Author Title} {
		if {![info exists fields($topic)]} continue
		set fields($topic) [encoding convertfrom $cset $fields($topic)]
	}
}

proc ::tclplkr::parse_text_record {chan offset rsize comp vheader vparagraphs} {
	upvar 1 $vheader header $vparagraphs paragraphs
	parse_data_record_header $chan $offset header
	
	if {![string equal $header(type) PHTML] \
	&& ![string equal $header(type) PHTML_COMPRESSED]} {
		puts $header(type)
		sanity error "type of metadata record is not DATATYPE_PHTML*"
	}

	set paragraphs {}
	set textlen 0
	for {set i 0} {$i < $header(paragraphs)} {incr i} {
		bscan [ckread $chan 4] SuSu size attributes
		# size and extra spacing, in pixels:
		lappend paragraphs [list \
			$size \
			[expr {2 * ($attributes & 0x3)}]
		]
		incr textlen $size
	}
	puts "text len: $textlen"
	puts "rsize: $rsize"
	# TODO sanity assert: textlen must be equal to $header(size)
	set dsize [expr {$rsize - 8 - (4 * $header(paragraphs))}]
	puts "dsize: $dsize"

	switch -exact -- $comp {
		zlib {
			# TODO -- wrong! what's the correct size?
			zlib decompress [ckread $chan $dsize]
		}
		doc {
		}
		default {
			error [format "invalid compression type \"%s\",\
				must be one of: zlib, doc"
		}
	}
}

proc ::tclplkr::process_text_func {func data} {
}

proc ::tclplkr::spot_text_func {data from vfunc vstart vend} {
	set ix [string first \u0000 $data $from]
	if {$ix < 0} { return 0 }

	upvar 1 $vfunc func $vstart start $vend end

	set start $ix

	incr ix
	binary scan [string index $data $ix] c fcode
	set flen [expr {$fcode & 0x07}]

	if {$flen == 0} {
		set end $ix
		set fdata ""
	} else {
		set end [expr {$ix + $flen}]
		incr ix
		set fdata [string range $data $ix $end]
	}

	set func [list $fcode $fdata]

	return 1
}

proc ::tclplkr::frobnicate data {
	set from 0
	set last [expr {[string length $data] - 1}]

	set out ""
	set func {}; set fstart 0; set fend 0
	while 1 {
		set found [spot_text_func $data $from func fstart fend]
		if {!$found} {
			if {$from <= $last} {
				append out [string range $data $from $last]
			}
			break
		}

		set end [expr {$fstart - 1}]
		if {$from <= $end} {
			append out [string range $data $from $end]
		}

		lassign $func fc fd
		append out [format "<F:%02X %d>" $fc [string length $fd]]

		set from [expr {$fend + 1}]
	}

	set out
}

### Testing code:

set fd [::tclplkr::open_input fsize]
puts "db file size: $fsize"

::tclplkr::parse_db_header $fd header
set header(creationDate) [clock format $header(creationDate)]
set header(modificationDate) [clock format $header(modificationDate)]
parray header

set offsets [::tclplkr::parse_record_id_list $fd]
set n 0
foreach off $offsets {
	puts [format "rec %d:\t%d" $n $off]
	incr n
}

::tclplkr::describe_data_records $fd $offsets $fsize uids
parray uids

# TODO (LOW) implement parsing of AppInfo Block

puts "after record id, at: [tell $fd]"
::tclplkr::parse_index_record $fd [lindex $offsets 0] comp resrecs
puts "comp: $comp"
parray resrecs

# metadata:
array set metadata {}; array set expncsets {};
set uid $resrecs(additional_metadata)
set off $uids($uid,offset)
set size $uids($uid,size)
::tclplkr::parse_metadata_record $fd $off $size metadata expncsets
if {[info exists metadata(PublicationDate)]} {
	set metadata(PublicationDate) [clock format $metadata(PublicationDate)]
}
parray metadata
parray expncsets

# zlib:
set res [::tclplkr::Zlib_init]
puts "Zlib initialized: $res"
puts "test=[zlib decompress [zlib compress test]]"

# home text record:
set uid $resrecs(home.html)
set off $uids($uid,offset)
set size $uids($uid,size)
array unset header *; set paragraphs {}
set data [::tclplkr::parse_text_record $fd $off $size $comp header paragraphs]
parray header
if 0 {
set dump [open dump w]
fconfigure $dump -translation binary
puts -nonewline $dump $data
close $dump
puts ----
puts "data head: [string range $data 0 100]...\n\
...[string range $data [expr {[string length $data] - 100}] end]"
}
puts ----
puts [string range [::tclplkr::frobnicate $data] 0 200]...
puts ----

close $fd

# vim:tw=72:noet

