// logs graphs to disk for backup and restore 
package backup

import (
	"log"
	"time"
	"encoding/csv"
	graph "code.google.com/p/feedgraph/graph/http"
)

type Backup struct {
	records chan []string
}

func WriteRecord(w *csv.Writer, g, src, dest, rel string, at time.Time) {

	err := w.Write([]string{g, src, rel, dest, at.Format(time.Stamp)})

	if err != nil {
		log.Println(err)
		return
	}


}

func ReadRecord(w *csv.Reader) (g, src, dest, rel string, at time.Time, err error) {

	strings, err := w.Read()

	if err != nil {
		log.Println(err)
		return
	}

	at, err = time.Parse(time.Stamp, strings[4])

	if err != nil {
		log.Println(err)
		return
	}

	g, src, rel, dest = strings[0], strings[1], strings[2], strings[3]

	return
}

func (b *Backup) RecordEdgeFeed(g, node, rel string) error {

	ch := make(chan graph.Entry, 100)
	graph.Watch(g, node, rel, ch)

	for entry := range ch {
		log.Println("downloading ", g, node, entry.Id, rel, entry.Time)
		b.records <- []string{g, node, rel, entry.Id, entry.Time.Format(time.Stamp)}
	}
	return nil
}

func New() *Backup {

	return &Backup{make(chan []string, 100)}
}

func (b *Backup) RecordNode(g, node string) {

	rch := make(chan graph.Entry, 100)

	log.Println("watching node ", node)
	graph.Watch(g, node, "_RELATIONS", rch)

	for entry := range rch {
		log.Println("watching relation ", entry.Id)
		b.RecordEdgeFeed(g, node, entry.Id)
	}

}

func (b *Backup) RecordGraph(g string) {

	log.Println("watching graph ", g)
			
	ch := make(chan graph.Entry, 100)
	graph.Watch(g, "_NODES", "_NEW", ch)

	for entry := range ch {
		go b.RecordNode(g, entry.Id)
	}
}

func (b *Backup) RecordAll() error {


	graphs := graph.Graphs()

	for _, g := range graphs {
		go b.RecordGraph(g)
	}

	return nil
}

func (b *Backup) Write(w *csv.Writer) {

	for record := range b.records {
		err := w.Write(record)

		if err != nil {
			panic(err)
		}

		w.Flush()
	}
}
