# /usr/bin/env python
# -*- coding: UTF-8 -*-
import requests
from requests.exceptions import SSLError
from requests.exceptions import ChunkedEncodingError
from requests.exceptions import ConnectionError
from bs4 import BeautifulSoup
from crawler import config
import os.path
from crawler.db.ImageInfoDBOperator import DBOperator, ImageInfo
from logs.Logger import Logger
import file_md5.FilePrint as file_print
import time
import sys
import urllib3

urllib3.disable_warnings()
# 设置环境变量
os.environ['HTTP_PROXY'] = 'http://127.0.0.1:7890'
os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:7890'
os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:7890'

path = config.generalConfig.weird_output_path
# url = "http://1024.sdccbly.pw/pw/html_data/106/1905/4095962.html"
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# 日志初始化
log = Logger("D:/workspace/logs/crawler.log", level='info')

def file_extension(path):
  return os.path.splitext(path)[1]

def mkdir(path):
    # 引入模块
    import os

    # 去除首位空格
    path = path.strip()
    # 去除尾部 \ 符号
    path = path.rstrip("\\")

    # 判断路径是否存在
    # 存在     True
    # 不存在   False
    isExists = os.path.exists(path)

    # 判断结果
    if not isExists:
        # 如果不存在则创建目录
        # 创建目录操作函数
        os.makedirs(path)

        print
        path + ' 创建成功'
        return True
    else:
        # 如果目录存在则不创建，并提示目录已存在
        print
        path + ' 目录已存在'
        return False


def path_manage(path):
    pos = path.find('|')
    return path[:pos]


def date_manage(tmp_date):
    tmp_date = tmp_date.replace('发表于: ', '')
    pos = tmp_date.find(" ")
    return tmp_date[:pos]


def path_clean(path):
    path = path.replace('?', '').replace("<", "").replace(">", "").replace(":", "")
    return path

def getContext(url):
    source_code = requests.get(url, headers=headers, verify=False)
    source_code.encoding = 'utf-8'
    plain_text = source_code.text
    soup = BeautifulSoup(plain_text,  'lxml')
    return soup


def img_download(url):
    db_session = DBOperator()
    download_links = []
    run_flag = 'run'
    retry_times = 0
    soup = ''
    title = ''
    while run_flag == "run":
        if retry_times > 3:
            log.logger.warn("Repeet 3 times, Exit!")
            sys.exit()
        try:
            soup = getContext(url)
            title = path_manage(soup.title.text)
            img_content = soup.find('div', {'class': 'content-section'})
            pre = img_content.find('pre')
            # date = date_manage(date.text)
            imgs = pre.findAll("img")
            for pic_tag in imgs:
                pic_link = pic_tag.get('src')
                download_links.append(pic_link)
            run_flag = "stop"
        except SSLError:
            retry_times = retry_times + 1
            time.sleep(5)
            print("SSLError")
            continue
        except AttributeError:
            retry_times = retry_times + 1
            time.sleep(5)
            print("AttributeError")
            continue
    i = 1
    size = len(download_links)
    for item in download_links:
        # 断点下载 start 在出错的编号+1的位置重传
        # if i < 151:
        #     i = i + 1
        #     print(i)
        #     continue
        # 断点下载 end
        run_flag = 'run'
        while run_flag == "run":
            try:
                res = requests.get(item)
                print(str(i) + "/" + str(size) + ":" + item)
                title = path_clean(title)
                tmp_dir = path + title + "/"
                print(tmp_dir)
                mkdir(tmp_dir)
                download_file_name = tmp_dir + str(i).zfill(3) + file_extension(item)
                with open(download_file_name, 'wb') as f:
                    f.write(res.content)
                    f.close()

                    # 检验下载图片是否重复
                    img_info = ImageInfo()
                    img_info.url = item
                    img_info.image_print = file_print.get_img_print(download_file_name)
                    d_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
                    img_info.crawled_time = d_time
                    img_info.parent_url = url
                    img_info.index = i
                    run_flag = "stop"
                    # 验证重复后，删除文件
                    time.sleep(1)
                    result = db_session.check_image_info(img_info)
                    if len(result) > 0:
                        log.logger.warn(
                            img_info.parent_url + " with image_print: " + str(img_info.image_print) + " have duplicated!")
                        log.logger.warn("File Name is " + download_file_name)
                        print("File Name is " + download_file_name)
                        os.remove(download_file_name)
                        # i = i + 1
                        continue
                # time.sleep(3)
            except SSLError:
                time.sleep(2)
                print("SSLError Exception Envolved!")
                log.logger.warn("HTTP Exception Envolved!")
                run_flag = "run"
                continue
            except ConnectionError:
                time.sleep(2)
                print("ConnectionError Exception Envolved!")
                log.logger.warn("HTTP Exception Envolved!")
                run_flag = "run"
                continue
            except ChunkedEncodingError:
                time.sleep(2)
                print("ChunkedEncodingError Envolved!")
                log.logger.warn("ChunkedEncodingError Envolved!")
                run_flag = "run"
                continue
        db_session.add_image_info(img_info)
        i = i + 1


def read_urls():
    file = open('D:/data/cool.txt')
    lines = file.readlines()

    strr = ''.join(lines)

    urls = strr.split()
    print(len(urls))
    return urls


def main():
    urls = read_urls()
    for url in urls:
        print(url)
        img_download(url)
    print('下载完成')


if __name__ == "__main__":
    main()
