# /usr/bin/env python
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
from crawler import config
from crawler.db.ImageInfoDBOperator import DBOperator, ImageInfo
from logs.Logger import Logger
import time
import sys
from Common import img_download
from requests.exceptions import SSLError
import urllib3

urllib3.disable_warnings()

path = config.generalConfig.weird_output_path
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# 日志初始化
log = Logger("D:/workspace/logs/crawler.log", level='info')

def path_manage(path):
    pos = path.find('|')
    return path[:pos]


def date_manage(tmp_date):
    tmp_date = tmp_date.replace('发表于: ', '')
    pos = tmp_date.find(" ")
    return tmp_date[:pos]


def path_clean(path):
    path = (path.replace('?', '').replace("<", "").replace(">", "").replace(":", "").replace('\'', '').replace('*', '')
            .replace('\"', '').replace('/', '').replace('|', ''))
    return path


def getContext(url):
    source_code = requests.get(url, headers=headers, verify=False)
    source_code.encoding = 'utf-8'
    plain_text = source_code.text
    soup = BeautifulSoup(plain_text,  'lxml')
    return soup


def img_download_caller(url, offset=0, abbr_url=''):
    db_session = DBOperator()
    download_links = []
    run_flag = 'run'
    retry_times = 0
    soup = ''
    title = ''
    date = ''
    while run_flag == "run":
        if retry_times > 3:
            log.logger.warn("Repeet 3 times, Exit!")
            sys.exit()
        try:
            soup = getContext(url)
            # title = path_manage(soup.title.text)
            title = soup.find('span', {'id':'subject_tpc'}).text
            date_el = soup.find('span', {'class': 'fl gray'})
            date = date_manage(date_el.text)

            for pic_tag in soup.select(".f14 img"):
                pic_link = pic_tag.get('src')
                download_links.append(pic_link)
            run_flag = "stop"
        except AttributeError:
            retry_times = retry_times + 1
            log.logger.warn("AttributeError involved！")
            time.sleep(3)
            continue
        except ConnectionResetError:
            retry_times = retry_times + 1
            log.logger.warn("ConnectionResetError involved！")
            time.sleep(3)
            continue
        except ConnectionError:
            retry_times = retry_times + 1
            log.logger.warn("ConnectionError involved！")
            time.sleep(3)
            continue
        except SSLError:
            retry_times = retry_times + 1
            log.logger.warn("SSLError involved！")
            time.sleep(3)
            continue

    title = path_clean(title)
    img_download(download_links, title, date, url, log, offset, abbr_url)


def read_urls():
    file = open('D:/data/urls.txt')
    lines = file.readlines()

    strr = ''.join(lines)

    urls = strr.split()
    print(len(urls))
    return urls

def get_abbr_parent_url(url):
    return url[url.find('?')+5:url.find('&')]

def main():
    urls = read_urls()
    for url in urls:
        print(url)
        # offset只需报出错的下标
        offset = 0
        db_session = DBOperator()
        if url.find('?') > 0:
            parent_urls = db_session.check_abbr_parent_url(get_abbr_parent_url(url))
        else:
            parent_urls = db_session.check_abbr_parent_url(url)
        if len(parent_urls) > 0:
            log.logger.warn(url + "..... has crawled!")
            continue
        img_download_caller(url, offset, get_abbr_parent_url(url))
    print('下载完成')


if __name__ == "__main__":
    main()
