# -*- coding: utf-8 -*-


__author__ = 'Fonny'

import os
from urllib2 import unquote
import re as regex
from requests import request
from bs4 import BeautifulSoup as btfs
import chardet


# strEncoding = ''

def main():
	outFile = r'_site_items.txt'

	if os.path.exists(outFile):
		os.remove(outFile)

	siteDict = getUrlPageItems(r'http://down.52pojie.cn/', outFile)
	return siteDict


def logToFile(aFile, aStr, aEncoding):
	f = open(aFile, 'a')
	s = ''
	if isinstance(aStr, str):
		s = aStr.encode(aEncoding)
	elif isinstance(aStr, dict):
		for k, v in aStr.items():
			s += ('%s:%s\n' % (k.encode(aEncoding), v.encode(aEncoding)))
		# print(s)
		s += '\n'
	else:
		f.close()
		print(r"Can't write the string")
		return None

	f.write(s)
	# f.flush()
	f.close()
	return None


def getUrlPageItems(aUrl, aOutFile):
	try:
		reobj = request('get', aUrl)
	except Exception, e:
		print('无法获取链接内容\n%s' % (e.message))
		return None

	src = reobj.content
	strEncoding = chardet.detect(src)['encoding']
	srcBtf = btfs(src, "lxml", from_encoding=strEncoding)

	dirFlag = regex.compile(r'.*/$', regex.IGNORECASE)

	for link in srcBtf.select('#list > tbody > tr'):

		if len(link.contents) == 3:
			outd = {'name': link.contents[0].a.string.strip() \
				, 'size': link.contents[1].string.strip() \
				, 'date': link.contents[2].string.strip() \
				, 'url': link.contents[0].a['href'].strip() \
				, 'parenturl': aUrl \
				, 'suburl': 'None' \
			    , 'fullurl': '%s%s' % (aUrl, link.contents[0].a['href'].strip())}

			if regex.match(dirFlag, outd['url']):
				outd['suburl'] = "%s%s" % (aUrl, outd['url'])

			if outd['name'] == r'Parent directory/':
				continue

			logToFile(aOutFile, outd, strEncoding)

			if outd['suburl'] != 'None':
				getUrlPageItems(outd['suburl'], aOutFile)

		else:
			print('page has changed style!')
			logToFile(outFile, u'page has changed style!', strEncoding)
			return None


if __name__ == '__main__':
	main()
