#! /usr/bin/python
# -*- coding: utf-8 -*- 
#import urllib.parse
#import urllib.request
import HTMLParser
from BeautifulSoup import BeautifulSoup
import getpass
import sys
import os
import codecs
import re
from mechanize import Browser
import traceback
class MyParser1(HTMLParser.HTMLParser):
	def putlist(self, thelist):
		self.list=thelist
		
	def __init__(self):
		HTMLParser.HTMLParser.__init__(self)
		self.st = 0
		self.value = ''

	def handle_starttag(self, tag, attrs):
		if tag == 'a':
			for name,value in attrs:
				if name == 'href' and (value.find('course_id') != -1):
					self.st = 1
					self.value = value
	def handle_data(self, text):
		if self.st == 1:
			self.st = 0
			self.list[text.decode('utf-8','ignore').strip()] = self.value
class MyParser2(HTMLParser.HTMLParser):
        def putlist(self, thelist1, thelist2):
                self.list1 = thelist1
                self.list2 = thelist2

        def __init__(self):
                HTMLParser.HTMLParser.__init__(self)
                self.st = 0
                self.value = ''
                self.count = 0

        def handle_starttag(self, tag, attrs):
                if tag == 'a':
                        for name,value in attrs:
                                if name == 'href' and (value.find('downloadFile_student') != -1 or value.find('hom_wk_detail') != -1):
                                        self.st = 1
                                        self.value = value
        def handle_data(self, text):
                if self.st == 1:
                        self.st = 0
                        self.list1[self.count] = text.strip().decode('utf-8','ignore')
                        self.list2[self.count] =  self.value
			self.count +=1



class webDownload:
	'''Download all file from Learn.tsinghua.edu.cn.
	
	testing'''
	learnurl = 'http://learn.tsinghua.edu.cn'
	homeworkurl = 'http://learn.tsinghua.edu.cn//MultiLanguage/lesson/student/'
	contenturl = 'http://learn.tsinghua.edu.cn/MultiLanguage/lesson/student/MyCourse.jsp?typepage=2'
	def __init__(self):
		self.br = Browser()
		self.courselist = {}
		self.filelist = []
	
	def login_me(self, userid , userpass):
		self.br.open(webDownload.learnurl)
		self.br.select_form(name='form1')
		self.br['userid'] = userid
		self.br['userpass'] = userpass
		resp = self.br.submit()
		if resp.read().find('loginteacher_action.jsp') != -1:
			print 'login successfully...'
		else:
			print 'login failed'
			return 0
		r=self.br.open(webDownload.contenturl)
		self.html = r.get_data()
		return 1	
		
	def setting(self , dirname):
		self.dir = dirname	
	def downloadall(self,userid , userpass):
		su = self.login_me( userid , userpass)
		if su == 0:
			return 0
		hp = MyParser1()
		hp.putlist(self.courselist)
		hp.feed(self.html)
		hp.close()
		print ' You have %d courses for all.'%len(self.courselist)
		self.downloadcourse()
	def downloadcourse(self):
		for coursename in self.courselist:
			try:

				print 'begin reading:'+coursename
				self.downloadonecourse(coursename , self.courselist[coursename])
				self.downloadonecoursehomework(coursename , self.courselist[coursename])
				coursedir = self.dir.decode('ascii','ignore')+coursename
				if (not os.path.exists(coursedir)) and ( not os.path.exists(coursedir+'_empty')):
					os.makedirs(coursedir+'_empty')
			except KeyboardInterrupt:
				raise KeyboardInterrupt
			except:
				
				traceback.print_exc()
				print 'Error occurred when downloading '+coursename
				continue
	def downloadonecourse(self , coursename , courselink):
		filecount = 0
		failcount = 0
		filedir = u''
		filedir += self.dir.decode('ascii','ignore')+coursename+u'/Files/'
		html = self.br.open(webDownload.learnurl+courselink.replace('course_locate' , 'download')).get_data()
		if(html.decode('utf-8','ignore').find(u'发生错误了') !=-1):
			return
		hp = MyParser2()
		titlelist = {}
		filelinklist = {}
		hp.putlist(titlelist,filelinklist)
		hp.feed(html)
		hp.close()
		if len(titlelist) >0:
			if not os.path.exists(filedir):
#				print 'mkdir'
#				print filedir
				os.makedirs(filedir)
 
		for ct in titlelist:
			try:
				file = self.br.open(webDownload.learnurl+filelinklist[ct])
				filename = file.info()['Content-Disposition'].split(r'"')[1].decode('gb2312','ignore').strip()
				if file.info()['content-type'].find('application')!= -1:
					nottext = 1
				else:
					nottext = 0
	                        if titlelist[ct] != '':
        	                        filename = titlelist[ct]+u'_'+filename
                	        filename = self.makelegalfilename(filename)
#                        	print 'save file:'
#	                        print filedir + filename
        	                if not os.path.isfile(filedir + filename):
                	                try:
                        	                if nottext == 1:
                                	                f = open((filedir + filename) , "wb")
                                        	else:
                                                	f = open((filedir + filename) , "w")
					except KeyboardInterrupt:
						raise KeyboardInterrupt
	                                except:
        	                                print 'Error when creating'+filedir+filename
						failcount += 1
                	                        continue
					if self.downloadfile(f , file , webDownload.learnurl+filelinklist[ct])== 1:
						filecount += 1
					else:
						failcount += 1
					
 
			except KeyboardInterrupt:
				raise KeyboardInterrupt
			except:
				traceback.print_exc()
				print 'Error when downloading files'
				failcount += 1
				continue
		print '%d File downloaded, %d failed'%(filecount,failcount)
	def downloadonecoursehomework(self , coursename , courselink):
		filecount = 0
		failcount = 0
                html = self.br.open(webDownload.learnurl+courselink.replace('course_locate' , 'hom_wk_brw')).get_data()
		if html.decode('utf-8','ignore').find(u'发生错误了')!= -1:
			return
		soup = BeautifulSoup(html)
		titlelist = {}
		detaillinklist = {}
		tmpcount=0
		for tmps in soup.findAll('a'):
			if tmps['href'].find('hom_wk_detail') != -1:
				titlelist[tmpcount] = tmps.string
				detaillinklist[tmpcount] = tmps['href']
				tmpcount += 1
		if len(titlelist) != 0:
 	               filedir = u"" + self.dir.decode('ascii','ignore')+coursename+u'/Homeworks/'
        	       if not os.path.exists(filedir):
#				print 'mkdir'
#				print filedir
                	        os.makedirs(filedir)
 
                for ct in titlelist:
                        html2 = self.br.open(webDownload.homeworkurl+detaillinklist[ct]).get_data().decode('gb2312','ignore')
			#print html2
			list1 = []
			list2 = []
			soup = BeautifulSoup(html2)
			tdlist = soup.findAll(['td','a'])
			tdlist = [t.string.strip() for t in tdlist if not t.string is None]
			#print tdlist
			alist = soup.findAll('a')
			if (not alist is None) and len(alist) > 0:
				for tt in alist:
					#if not  tt.string is None:
					#	tdlist.append(tt.string)
					if tt['href'].find('downloadFile')!= -1:
						list2.append(tt['href'])
			for tmps in tdlist:
				if not tmps is None:
					list1.append(tmps.strip().replace('&nbsp',' ')+'\n')
			filename = filedir+self.makelegalfilename(u'details_'+`ct`+'_'+titlelist[ct]+u'.txt')
#			print 'detail filename'
#			print filename
			if not os.path.isfile(filename):
				try:
					f= codecs.open(filename , 'w','utf-8')
				except KeyboardInterrupt:
					raise KeyboardInterrupt
				except:
					filename = filedir + u'details_' + `ct` + u'.txt'
					try:
						f = codecs.open(filename , 'w','utf-8')
					except KeyboardInterrupt:
						raise KeyboardInterrupt
					except:
						print 'Error when creating'+ filename
						continue
				f.write("".join(list1))
				f.close()
#			print 'detail:'
#			print "".join(list1)
			for ct2 in list2:
				try:
	                        	file = self.br.open(webDownload.learnurl+ct2)
        	                	filename = file.info()['Content-Disposition'].split(r'"')[1].decode('gb2312','ignore').strip()
                                	if file.info()['content-type'].find('application')!= -1:
                                       		nottext = 1
                                	else:
                                        	nottext = 0
 	                                filename = self.makelegalfilename(filename)
#        	                        print 'homework file'
#                	                print filedir+filename
                        	        if not os.path.isfile(filedir + filename):
                                	        try:
                                        	        if nottext == 1:
                                                	        f = open(filedir + filename , "wb")
	                                                else:
        	                                                f = open(filedir + filename, "w")
                	                        except:
                        	                        print 'Error when creating' + filename
							failcount += 1
                                	                continue
						if self.downloadfile(f , file , webDownload.learnurl+ct2) == 1:
							filecount += 1
						else:
							failcount += 1
				except KeyboardInterrupt:
					raise KeyboardInterrupt
 				except:
					print 'Error when downloading homework file'
					traceback.print_exc()
					failcount += 1
					continue
		print '%d Homework downloaded, %d failed'%(filecount,failcount)

 	def makelegalfilename(self,name):
		s = name.replace('&nbsp',' ').replace('/','_').replace('\\','_').replace('?','_').replace('*','_').replace(r'"','_').replace(':','_')
		s = s.replace('<','_').replace('>','_').replace('|','_')
		if len(s) > 256:
			s = s[0:200]+'...'+s.split('.')[-1]
		return s
	def downloadfile(self , f , fileres , link):
		precache = 5000000
		try:
			le = int(fileres.info()['content-length'])

			if le < precache:
				f.write(fileres.read())
				f.close()
			else:
				for i in range(0,int(le/precache)+1):
					f.write(fileres.read(precache))
				f.close()
			fileres.close()
		except KeyboardInterrupt:
			raise KeyboardInterrupt
		except:

			traceback.print_exc()
			return 0
		else:
			return 1
if __name__=='__main__':
	try:
		web = webDownload()
		dir = raw_input('input dir to save files:')
		if not dir.endswith('/'):
			dir = dir + '/'
		user = raw_input('input username(like 20060xxxxx):')
		passwd = getpass.getpass('input password:')
		web.setting(dir)
		web.downloadall(user,passwd) 
	except:
		print 'something goes wrong'
		traceback.print_exc()
	raw_input('Download Finish. Press ENTER to exit')
