'''
CppReferenceCrawler V1.0
By SkyFire
QQ:1513008876
E-Mail:skyfireitdiy@hotmail.com
'''

import os
import httplib2
import errno
import re

class Downloader:
    '''
    下载网页类
    '''
    __old_path = ""
    __urls = set()
    __http_client = httplib2.Http('.Catch')
    __retry_times=0

    def __init__(self, save_dir = '.'):
        '''
        初始化下载网页类
        :param save_dir:工作目录（网页会被存储到此处）
        '''
        self.__old_path = os.path.curdir
        os.chdir(save_dir)

    def start(self):
        '''
        开始下载
        '''
        self.__download("http://www.cplusplus.com")

    def __remove_prefix(self, url):
        '''
        删除网址前缀，如网址为'http://www.cplusplus.com/reference/iostream/'，则返回'/reference/iostream/'
        :param url: URL
        :return:
        '''
        pre_fix="http://www.cplusplus.com"
        new_url=url[len(pre_fix):]
        if len(new_url)==0:
            return ''
        if new_url[0]=='/':
            return new_url[1:]

    def __mkdir(self, dir_path):
        '''
        递归创建文件夹
        :param dir_path: 文件夹路径
        '''
        if not dir_path.endswith('/'):
            index=dir_path.rfind('/')
            if index == -1:
                return
            dir_path= dir_path[:index+1]
        try:
            os.makedirs(dir_path)
        except OSError as exc:
            if exc.errno == errno.EEXIST and os.path.isdir(dir_path):
                print("Dir Exist")
                pass
            else:
                raise

    def __write_file(self,file_path,content):
        '''
        写入文件
        :param file_path: 文件路径
        :param content: 文件内容
        '''
        fp=open(file_path,'w')
        if fp.writable():
            fp.write(content)
        fp.close()

    def __find_url(self,content):
        '''
        提取网页中的cpp reference url（只包括主要内容，不包括图片等信息）
        :param content: 网页内容
        :return:
        '''
        pre_fix = "http://www.cplusplus.com"
        url_list=re.findall(r'"/reference/[0-9a-zA-Z./]*"',content)
        ret=list()
        for u in url_list:
            ret.append(pre_fix+u[1:len(u)-1])
        return ret


    def __download(self, url):
        '''
        递归下载网页，并按照目录保存
        :param url: URL
        '''
        try:
            response, content = self.__http_client.request(url)
            if response['status'] == '200':
                self.__retry_times=0
                content=content.decode('utf-8')
                new_path=self.__remove_prefix(url)
                if len(new_path)!=0:
                    self.__mkdir(new_path)
                    if new_path.endswith('/'):
                        new_path+='index.html'
                    self.__write_file(new_path,content)
                    print(url + " download over!")
                url_list=self.__find_url(content)
                for url in url_list:
                    if not url in self.__urls:
                        self.__urls.add(url)
                        self.__download(url)
            else:
                self.__retry_times+=1
                if self.__retry_times>=5:
                    self.__retry_times=0
                    print(response)
                    return
                print(url+" download error, retry "+str(self.__retry_times))
                self.__download(url)
        except:
            self.__retry_times+=1
            if(self.__retry_times>=5):
                self.__retry_times=0
                print(response)
                return
            print("url download exception raise, retry "+str(self.__retry_times))
            self.__download(url)


if __name__ == "__main__":
    downloader = Downloader()
    downloader.start()
    print("all www.cplusplus.com website download over!")
    input("press Enter to exit")