#!/bin/bash

## Sync citeulike libary to local file repository.
## Most (all?) metadata is recorded in the bibtex export.
## The first attachment is downloaded.
##
## http://www.citeulike.org/groupforum/1786
##
## TODO:
## - How to notice if citeulike entry was modified (attachment/metadata)?
##   Currently you have to delete the local entry but only the first few entries are compared :-/
## - Upload local changes?
## - Sync against DBLP?
## - The only useful thing about citeulike is recommondation and fancy webinterface.. :-/

papers=/home/pepe/files/lispler/citeulike

user=
pass=

test -d $papers || exit
test -w $papers || exit

# scan specific tag
# (we're not parsing all the available pages yet and
#  some docs are not visible when looking at complete set)
tag=""
[ "A$1" != "A" ] && tag="/tag/$1"

## login
cookies=$(tempfile)
wget -q -O /dev/null --keep-session-cookies --save-cookies "$cookies" \
			--user-agent "Firefox" \
      --post-data="username=$user&password=$pass&perm=1" http://www.citeulike.org/login.do

wget="wget -q --user-agent "Firefox" --load-cookies $cookies"

## get latest stuff (based on public RSS)
#feed="$(wget -q -O - --load-cookies "$cookies" http://www.citeulike.org/rss/user/$user)"
#  for id in $(echo "$feed"| grep "item rdf:about"|sed -e s/".*article\/"// -e s/\".*//); do

## get latest stuff (based on Library)
feed="$($wget -O - "http://www.citeulike.org/user/$user$tag")"

## foreach(article)
for id in $(echo "$feed"| grep "href=\"/user/$user/article/[0-9]*\">"|sed -e s/".*article\/"// -e s/"\">.*"//|sort -n |uniq); do
  
	# ignore known articles
	test -d $papers/$id && continue
	echo -n Exporting article $id...
	# download to tmp, so that we can seamlessly retry on error
	tmp=$(mktemp -d)

	## download BibTex
	$wget -O "$tmp/$id.bib" "http://www.citeulike.org/bibtex/user/$user/article/$id?incl_amazon=0"

	# get article page
  article="$($wget -O - "http://www.citeulike.org/user/$user/article/$id")"

  # merge reading priority (0 = already read)
  prio="$(echo "$article"|grep " alt=\"Priority"|sed -e s/".*alt=\"Priority "// -e s/"\" .*"//)"
  # we can't use [ "num" -lt 6 ] to assure it is a digit and without quotes is unsafe, so..
  prio="$(echo "$prio"|tr -d "[:alpha:][:punct:][:cntrl:][:space:]")"
  [ $prio -gt 5 ] && prio=1
  [ $prio -lt 0 ] && prio=1
  echo $prio > "$tmp/.priority"

	echo -n loading files...

	file="$(echo "$article"|grep "href=\"/pdf//user/$user/article/$id"|sed -e s/".*href.*$id\/"// -e s/\".*//)"
	link="$(echo "$article"|grep "id=\"linkouts\"" |grep "{type: 'URL'" \
		|sed -e s/".*{type: 'URL'"// -e s/"{type:.*"// -e s/".*href=\""// -e s/"\".*"//)"

	# only use the link if no file was attached
	[ "A$file" != "A" ] && link="http://www.citeulike.org/pdf/user/$user/article/$id/$file"

	# create better file name from bibtex: CamelCaseAllWords.sameExt
	# note that we always have at most one file, so one name is okay 
	filename="$(grep " title" $tmp/$id.bib|sed s/.*=//|tr '[:punct:][:cntrl:]' ' '|sed "s/\b\(.\)/\u\1/g"|tr -d '[:space:]')"
	filename="$filename$(echo "$link"|sed -e s/".*\."/./ -e s/"\?.*"//)"

	# we still need to check $link in case there was no $file..
	[ "A$link" != "A" ] && $wget -O "$tmp/$filename" "$link"

	mv $tmp $papers/$id

	echo fin.

done

rm -f "$cookies"

# vim:tw=0
