#!/usr/bin/env python
'''
简单的网页爬虫
依赖的库retrying、BeautifulSoup、colorama 自行百度安装
pip install retrying
pip install colorama
pip install beautifulsoup4
'''

import urllib.request
import os
import os.path
from retrying import retry
from bs4 import BeautifulSoup
from colorama import init, Back
import SystemPlatformUtil
# from colorama import init, Fore, Back, Style


class Crawler(object):

    def getDefaultOutputDir(self):
        defaultOutputDir = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), "output")
        if not os.path.exists(defaultOutputDir):
            os.makedirs(defaultOutputDir)
        return defaultOutputDir

    @property
    def baseOutputDir(self):
        # 未指定输出目录 使用默认目录
        if not hasattr(self, "_baseOutputDir") or not self._baseOutputDir:
            self._baseOutputDir = self.getDefaultOutputDir()
        return self._baseOutputDir

    @baseOutputDir.setter
    def baseOutputDir(self, value):
        if not value:
            raise ValueError("输出目录不能为空!")
        self._baseOutputDir = value

    # 清除输出目录里的旧的爬取文件
    def clearOldSavedContent(self):
        if not os.path.exists(self.baseOutputDir):
            os.makedirs(self.baseOutputDir)
        outputList = os.listdir(self.baseOutputDir)
        for f in outputList:
            os.remove(os.path.join(self.baseOutputDir, f))
        print("旧的爬取输出文件已清除！\n")

    # 为链接拼接域名
    def appendDomainForUrl(self, domain, relativeUrl):
        if relativeUrl.startswith("http"):
            return relativeUrl
        if relativeUrl.startswith("/"):
            url = domain + relativeUrl
        else:
            url = domain + "/" + relativeUrl
        return url

    # 保存详情页内容
    def saveContent(self, title, url, content, baseOutputDir):
        # 替换文件名中当前操作系统禁止使用的字符
        title = SystemPlatformUtil.replaceDisabledCharInWindows(title)
        # TODO linux和mac的过滤
        # 检查目录路径是否存在
        if not os.path.exists(baseOutputDir):
            os.makedirs(baseOutputDir)
        # 构建输出路径
        savePath = os.path.join(baseOutputDir, title + ".html")
        file = open(savePath, 'wb')

        # 将博文信息写入文件(以utf-8保存的文件声明为gbk)
        file.write(("title:" + title + "<br>" + "url:<a href=\"" +
                    url + "\">" + url + "</a><br><br>").encode('gbk', 'ignore'))
        file.write(str(content).encode('gbk', 'ignore'))
        file.close()
        print(title + "---->保存成功！")

    # 解析并保存详情页内容
    @retry(stop_max_attempt_number=3)
    def parseContent(self, url, targetWebsite):
        # 爬取数据
        request = urllib.request.Request(url)
        try:
            data = urllib.request.urlopen(
                request, timeout=3).read().decode(targetWebsite.charset)
        except Exception as e:
            print(Back.RED + url + "----->重试次数超过3次仍然请求超时，跳过!")
            return (None, None)
        # 用BeautifulSoup解析数据
        soup = BeautifulSoup(data, 'lxml')
        # 将所有的超链接 相对路径转绝对路径
        self.relativeToAbsolute(targetWebsite.domain, soup)
        # 读取列表内容
        content = soup.select(targetWebsite.contentSelectorSyntax)
        title = self.getTitleFromSoup(soup)
        # print("parseContent title:" + title)
        # print("parseContent content:" + str(content))
        return (content, title)

    # src 和 href对应的value,相对路径转绝对路径
    def relativeToAbsolute(self, domain, soup):
        allHref = soup.find_all('a')
        for h in allHref:
            href = h.get('href')
            if href and not href.startswith("http"):
                href = self.appendDomainForUrl(domain, href)
                h['href'] = href
            # print("href:" + h['href'])
        allImg = soup.find_all('img')
        for i in allImg:
            src = i.get("src")
            if src and not src.startswith("http"):
                src = self.appendDomainForUrl(domain, src)
                i['src'] = src
            # print("src:" + i['src'])

    @property
    def targetWebsiteList(self):
        if not hasattr(self, "_targetWebsiteList"):
            self._targetWebsiteList = []
        return self._targetWebsiteList

    def addTargetWebsite(self, targetWebsite):
        if not hasattr(self, "_targetWebsiteList"):
            self._targetWebsiteList = []
        self._targetWebsiteList.append(targetWebsite)
        return self

    def getTitleFromSoup(self, soup):
        title = soup.find("title").string
        if not title:
            title = ""
        return title

    def parseList(self, targetWebsite, isTest):
        request = urllib.request.Request(targetWebsite.url)
        try:
            response = urllib.request.urlopen(request, timeout=5)
            data = response.read().decode(targetWebsite.charset)
            if isTest:
                print("data:" + str(data))
        except Exception as e:
            print("网络请求超时!请重试!")
            print("type error: " + str(e))
        # 用BeautifulSoup解析数据
        soup = BeautifulSoup(data, 'lxml')
        # target = self.getTitleFromSoup(soup) + "(" + targetWebsite.url + ")"
        # 将所有的超链接 相对路径转绝对路径
        self.relativeToAbsolute(targetWebsite.domain, soup)
        print(Back.GREEN + "\n爬取目标---->" + targetWebsite.name +
              "(" + targetWebsite.url + ")\n")

        # 解析列表
        return soup.select(targetWebsite.listSelectorSyntax)

    def testParseList(self, targetWebsite):
        list = self.parseList(targetWebsite, True)
        # print("list:" + str(list))
        for x in list:
            hrefTitle = x.string
            hrefUrl = x['href']
            print("title:" + str(hrefTitle) + ",url:" + str(hrefUrl))

    def testParseContent(self, hrefUrl, targetWebsite):
        (content, title) = self.parseContent(hrefUrl, targetWebsite)
        print("content:" + str(content))

    def crawled(self, targetWebsite):
        list = self.parseList(targetWebsite, False)
        # 遍历列表 解析并保存详情页数据
        for x in list:
            # hrefTitle = x.string
            hrefUrl = x['href']
            (content, title) = self.parseContent(hrefUrl, targetWebsite)
            if not content or not title:
                continue
            self.saveContent(title, hrefUrl, content, self.baseOutputDir)

        print(Back.GREEN + "\n" + targetWebsite.name +
              "(" + targetWebsite.url + ")" + "---->爬取完毕！")
        print("----------------------------------------------------------------------------------------------------")

    def start(self):
        for targetWebsite in self.targetWebsiteList:
            self.crawled(targetWebsite)

    def __init__(self):
        # 命令行文字颜色自动恢复
        init(autoreset=True)
