# -*- coding: UTF-8 -*-

import sys
import os
import os.path
import urllib
import urllib.request
import bs4
import argparse
import time

from urllib.request import urlopen
from urllib.parse import urlparse
from bs4 import BeautifulSoup
#from bs4 import SoupStrainer

DEFAULT_ENCODING = 'gb18030'
MIN_FILE_SIZE = 2
MAX_RETRIES = 5

socket_timeout = 20.0
sleep_timeout = 5.0

def linkpage_parser(url):
    '''    
    :param rooturl:
    '''
    req = urllib.request.Request(url)
    req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0')
    response = urllib.request.urlopen(req, timeout=socket_timeout)
    page = response.read()
    response.close()
    
    soup = BeautifulSoup(page)
    
    allLiSoup = soup.find_all('li')
    
    urlList = []
    
    for eachLiSoup in allLiSoup:
        aSoup = eachLiSoup.find('a', attrs={'target': '_blank'})
        link = aSoup.get('href')
        if '.html' in link:
            urlstr = urllib.parse.urljoin('http://www.911bpian.com/', link)
            urlList.append(urlstr)
    
    return (urlList)    


def txtpage_downloader(url, dir):
    
    name = url.replace('http://www.911bpian.com/html/book/', '')
    name = os.path.join(dir, name)
    
    if os.access(name, os.R_OK) and (os.path.getsize(name) > MIN_FILE_SIZE*1024):
        return True
    
    time.sleep(sleep_timeout)
    req = urllib.request.Request(url)
    req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0')
    response = urllib.request.urlopen(req, timeout=socket_timeout)    
    page = response.read()    
    response.close()

    f = open(name, 'wb')  
    f.write(page)  
    f.close()
    
    if (os.path.getsize(name) > MIN_FILE_SIZE*1024):
        return True
    else:
        return False

def auto_txtpage_downloader(url, dir):    
    retries = 0
    while True:
        try:
            status = txtpage_downloader(url, dir)
            if (status == True):
                break                   
        except:
            retries += 1            
            print ("    Unexpected error:", sys.exc_info()[0])
        
        if (retries >= MAX_RETRIES):
            break    
        print('    ===>enlarge sleep time: ' + str(sleep_timeout*retries) + 's')
        time.sleep(sleep_timeout*retries)

if __name__ == '__main__':    
    #current_encoding = sys.getfilesystemencoding()
    #print (current_encoding)
    
    argument_parser = argparse.ArgumentParser(description='text file downloader')
    argument_parser.add_argument('-lf', dest = 'LinkFile', help='The link file contains the start url links to download')
    argument_parser.add_argument('-o', dest = 'DestDir', help='Folder to store the downloaded files')
    argument_parser.add_argument('-rs', dest = 'FileSize', nargs=1, type=int, help='Specify the minimal file size in KB when re-visiting all local files. Re-download if file size is less than this value')
    
    args = argument_parser.parse_args()    

    linkFile = args.LinkFile;
    outDir = args.DestDir
    
    if args.FileSize and (args.FileSize[0] > MIN_FILE_SIZE):
        MIN_FILE_SIZE = args.FileSize[0]

    f = open(linkFile, 'r')
    lines = f.readlines()
    f.close()
        
    for eachline in lines:
        link = eachline.rstrip('\n')
        if '.html' in link:
            print (link)
            time.sleep(sleep_timeout)
            try:
                urllist = linkpage_parser(link)           
           
                for url in urllist:
                    print ('  ' + url)                
                    auto_txtpage_downloader(url, outDir)
            except:
                print ("Unexpected error:", sys.exc_info()[0])
     

