import getpass
import os
import urllib
import urllib2
from xml.dom import minidom
import string
import sys
import md5
#BEGIN###########################################################################################
import re
import time
from datetime import datetime
import shutil
#END###########################################################################################

feed = "https://docs.google.com/feeds/documents/private/full"
dlall = True
version = 0.02
recordtolog = True

formats = {'mso':("doc","4","ppt"),'csv':("txt","5&gid=0","txt"),'oo':("odt","13","ppt"),'pdf':("pdf","12","pdf"),'txt':("rtf","23&gid=0","txt"),}
format = "mso"
docfmt = formats[format][0]
spdfmt = formats[format][1]
prsfmt = formats[format][2]

cmdemail = ""
cmdpass = ""
path = ""

for arg in sys.argv:
	if arg.find("--feed=")==0:
		val = arg.partition("=")[2]
		feed = val
		print feed
	if arg == "--modified-only" or arg == "-m":
		dlall = False
	if arg == "--version" or arg == "-v":
		print "Current version of gdd.py:", version
		try:
			vreq = urllib2.Request("http://www.1st-soft.net/gdd/version2.txt")
			vresponse = urllib2.urlopen(vreq).read()
			print "Most recent version is " + vresponse.strip()
			if float(vresponse) > version:
				print "Your version of gdd.py is out of date!"
				print "Visit http://www.1st-soft.net/gdd for an updated version."
			else:
				print "gdd.py is currently up to date."
		except urllib2.HTTPError:
			print "Could not contact 1st-soft.net for most recent version."
		sys.exit(0)
	if arg == "--help" or arg == "-h":
		print "Please see readme.txt"
		sys.exit(0)
	if arg.find("--format=")==0:
		format = arg.partition("=")[2]
		if format in formats:
			docfmt = formats[format][0]
			spdfmt = formats[format][1]
			prsfmt = formats[format][2]
	if arg.find("--document-format=")==0:
		val = arg.partition("=")[2]
		if val in formats:
			docfmt = formats[val][2]
		else:
			docfmt = val
	if arg.find("--spreadsheet-format=")==0:
		val = arg.partition("=")[2]
		if val in formats:
			spdfmt = formats[val][2]
		else:
			spdfmt = val
	if arg.find("--presentation-format=")==0:
		val = arg.partition("=")[2]
		if val in formats:
			prsfmt = formats[val][2]
		else:
			prsfmt = val
	if arg.find("--email=")==0:
		val = arg.partition("=")[2]
		cmdemail = val
	if arg.find("--password=")==0:
		val = arg.partition("=")[2]
		cmdpass = val
	if arg.find("--path=")==0:
		val = arg.partition("=")[2]
		if os.path.exists(val):
			path = val + "/"
			path = string.replace(path,"//","/")


def login():
	global path
	
	if cmdemail == "":
		email = raw_input("Email: ")
	else:
		email = cmdemail
	if cmdpass == "":
		password = getpass.getpass()
	else:
		password = cmdpass

	url = 'https://www.google.com/accounts/ClientLogin'
	user_agent = 'GDD Python 0.01'
	values = {'Email' : email, 'Passwd' : password, 'accountType' : 'HOSTED_OR_GOOGLE', 'service' : 'writely', 'source' : 'GDD Python 0.01' }
	values2 = {'Email' : email, 'Passwd' : password, 'accountType' : 'HOSTED_OR_GOOGLE', 'service' : 'wise', 'source' : 'GDD Python 0.01' }
	headers = { 'User-Agent' : user_agent }

	try:
		data = urllib.urlencode(values)
		data2 = urllib.urlencode(values2)
		req = urllib2.Request(url, data, headers)
		req2 = urllib2.Request(url, data2, headers)
		response = urllib2.urlopen(req)
		response2 = urllib2.urlopen(req2)
		docauth = response.read()
		docauth = docauth.splitlines()[2].split("=")[1]
		spreadauth = response2.read()
		spreadauth = spreadauth.splitlines()[2].split("=")[1]
	
		if(os.path.exists(path+email) == False):
			os.mkdir(path+email)
	
		return (email,docauth,spreadauth)
	
	except urllib2.HTTPError:
		print "Authentication failed"
		sys.exit(0)
		return ("","","")


def getFeed(auth,feed,lastdate):
	try:
		url = feed
		if not dlall and lastdate != None and url.find("?")==-1:
			url = url + "?" + "updated-min=" + lastdate
		elif not dlall and lastdate != None and url.find("?")!=-1:
			url = url + "&" + "updated-min=" + lastdate
		user_agent = 'GDD Python 0.01'
		req = urllib2.Request(url)
		req.add_header('Authorization','GoogleLogin auth='+auth[1])
		response = urllib2.urlopen(req)
		feedxml = response.read()
		return feedxml
	except:
		print "Feed request failed"
		sys.exit(0)
		return ""


def parseFeed(xml):
	dom = minidom.parseString(xml)
	docs = []
	urls1 = []
	urls2 = []
	for node in dom.getElementsByTagName('entry'):
		updated = None
		for i in node.childNodes:
			if i.nodeName == 'gd:resourceId':
				doc = i.childNodes[0].nodeValue
			if i.nodeName == 'updated':
				updated = i.childNodes[0].nodeValue
		#BEGIN###########################################################################################
		doc_dt = doc +":"+ string.replace(updated,":","|")
		docs.append(doc_dt)
		#END###########################################################################################
	for x in docs:
		x = string.split(x,":")
		type = x[0]
		id = x[1]
		#BEGIN###########################################################################################
		dt_updated = x[2]
		if type == "document":
			urls1.append(["https://docs.google.com/feeds/download/documents/Export?docID="+id+"&exportFormat="+docfmt,dt_updated,id])
		elif type == "spreadsheet":
			urls2.append(["https://spreadsheets.google.com/feeds/download/spreadsheets/Export?key="+id+"&fmcmd="+spdfmt,dt_updated,id])
		elif type == "pdf":
			urls1.append(["https://docs.google.com/gb?export=download&id=F."+id,dt_updated,id])
		else:
			urls1.append(["https://docs.google.com/feeds/download/presentations/Export?docID="+id+"&exportFormat="+prsfmt,dt_updated,id])
		#END###########################################################################################
	return (urls1,urls2)



#BEGIN###########################################################################################
def getFile(url_tu_id,headers,dname,fname):
	url = url_tu_id[0]
	dfname_timeupdate = url_tu_id[1]
	dfname_docid = url_tu_id[2]
#END###########################################################################################
	if dname != "":
		dname = dname+"/"
	req = urllib2.Request(url)
	for k, v in headers.iteritems():
		req.add_header(k, v)
	try:
		f = urllib2.urlopen(req)
		
		if(f.info().getheader('Content-Disposition') != None):
			fname = f.info().getheader('Content-Disposition').split("=")[1]
			fname = urllib2.unquote(fname)
			fname = string.replace(fname,"\"","")
			fname = string.replace(fname,"/","_")
			fname = string.replace(fname,"\\","_")
		        #BEGIN###########################################################################################
			fname = string.replace(fname,"\xC3","i")
			fname = string.replace(fname,"\xAD","i")
		        #END###########################################################################################
		print "Downloading " + fname + "...",
		sys.stdout.flush()

		#BEGIN###########################################################################################
		dfname = fname  
		x3 = dfname_docid.split("_")
		if len(x3) == 2 :
			dfname_docid_2 = x3[1]
		else :
			dfname_docid_2 = dfname_docid
		dfname_split = dfname.split(".")
		if len(dfname_split) > 1 :
			dfname_noext = ".".join(dfname_split[:-1])
			dfname_ext = "."+dfname.split(".")[-1]
		else :
			dfname_noext = dfname
			dfname_ext =  ""
		#dfname_new = dfname_noext + "__" + dfname_docid_2 + "__" + dfname_timeupdate + "__" + dfname_ext
		dfname_new = dfname_noext + "__" + dfname_docid_2 + "__" + dfname_ext
		print " renamed to " + dfname_new + " " ,
		# obtain the original modified time from google docs.
		keyword = re.compile(r"(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d)\|(\d\d)\|(\d\d).*")
		result = keyword.search(dfname_timeupdate)
		_dt_unixtime_ = 0
		if result:
			_dt_ = datetime( int(result.group(1)),int(result.group(2)),\
				int(result.group(3)),int(result.group(4)),\
				int(result.group(5)),int(result.group(6))) 
			_dt_unixtime_ = time.mktime(_dt_.timetuple())
		#if file already exist, move it to ./backup directory
		#   the new file name will be <orig_file_name>__<docid>__<update YYYYMMDDhhmmss>.<orig_extention>
		if os.path.exists(dname+dfname_new) :
			dfn_mtime = os.stat(dname+dfname_new).st_mtime
			if _dt_unixtime_ != dfn_update :
				print " backup previus file " ,
				dfn_update = datetime.fromtimestamp(dfn_mtime).strftime("%Y%m%d%H%M%S")
				dfn_bkp = dfname_noext + "__" + dfname_docid + "__" + dfn_update + "__" + dfname_ext
				dir_bkp = dname +"backup_old/"
				if not os.path.isdir(dir_bkp) :
					try:
						os.mkdir(dir_bkp)
					except OSError:
						if len(dname) > 1 :
							dir_bkp = dname[:-1] + "__backup__"
						else:
							dir_bkp = "__backup__"
				shutil.move(dname+dfname_new,dir_bkp+dfn_bkp)
			##os.utime(dir_bkp+dfn_bkp, (b_atime, b_mtime)) # set the original timestamp
			else:
				print " both mtimes are equal."
		local = open(dname+dfname_new, "wb")
		#END###########################################################################################

		chunkSize = 10240
		while 1:
			dbuffer = f.read(chunkSize)
			if dbuffer:
				local.write(dbuffer)
			else:
				break
		local.close()

		#BEGIN###########################################################################################
		if _dt_unixtime_ > 0 :
			# set the original file mtime from google docs
			os.utime( dname+dfname_new , ( _dt_unixtime_ , _dt_unixtime_ ) )
			print " datetime " , _dt_ ,
		#END###########################################################################################

		print " done"
		return True
	except urllib2.HTTPError:
		print "There was an error downloading " + url
		return False
	except urllib2.URLError:
		print "There was an error downloading " + url
		return False


def download(auth,docs,spreadsheets):
	noerror = True
	for i in docs:
		noerror = getFile(i,{'Authorization':'GoogleLogin auth=' + auth[1]}, path+auth[0], "document") and noerror
	for i in spreadsheets:
		noerror = getFile(i,{'Authorization':'GoogleLogin auth=' + auth[2]}, path+auth[0], "spreadsheet") and noerror
	return noerror

print "Welcome to Google Docs: Download for Python"
print "Authenticating..."
auth = login()
print "Authentication Successful!"

if not os.path.exists("log.txt"):
	z = open("log.txt","w")
	z.close()

f = open("log.txt","r")
while 1:
	d = f.readline()
	if d and d.strip() != "":
		d = d.split(" :: ")
		if d[0].strip() == auth[0] and d[2].strip() == feed:
			lastdate = d[1].strip()
			break
	else:
		lastdate = None
		break
f.close()

print "Retrieving list of documents..."
feedxml = ""
if auth[0] != "" and auth[1] != "":
	feedxml = getFeed(auth,feed,lastdate)
if feedxml != "":
	print "Document list retrieved."
	urls = parseFeed(feedxml)
	print "Downloading your documents..."
	recordtolog = download(auth,urls[0],urls[1]) and recordtolog


if auth[0] != "" and auth[1] != "":
	dom = minidom.parseString(feedxml)
	thedate = dom.getElementsByTagName('updated')[0].childNodes[0].nodeValue
	
	if(os.path.exists("data") == False):
		os.mkdir("data")
	m = md5.new()
	m.update(auth[0]+" :: "+feed)
	f = open("data/"+m.hexdigest()+".xml", "w")
	f.write(feedxml)
	f.close()
	
	if recordtolog:
		print "Writing session information to log.txt..."
		os.rename("log.txt","log.txt.old")
		z = open("log.txt.old","r")
		f = open("log.txt", "w")
		f.write(auth[0]+" :: "+thedate+" :: "+feed+"\n")
		while 1:
			d = z.readline()
			if d:
				f.write(d)
			else:
				break
		f.close()
		z.close()
		os.remove("log.txt.old")
	else:
		print "There were some errors while downloading your files.  Session will not be recorded to log.txt."
	
	
	print "All done!"


# This software is licensed under the CC-GNU GPL.
# http://creativecommons.org/licenses/GPL/2.0/
# Google Docs: Download for Python was written by Peter Shafer, in June 2009.

