from os import makedirs

import requests
import re
from bs4 import BeautifulSoup
from contextlib import closing

from os.path import exists
import sys

sys.setrecursionlimit(1000000)
server = "http://www.735357.com"


def req_get_bf(url):
    return BeautifulSoup(requests.get(url, timeout=180).text, 'html.parser')


def get_detail_pic(temp_bf, url_detail, img_list):
    if temp_bf:
        bf = temp_bf
    else:
        bf = req_get_bf(url_detail)
    content_pic_bf = bf.find_all('div', 'content-pic')
    img_bf = content_pic_bf[0].find_all('img')
    for img_tag in img_bf:
        pic_src = img_tag.get('src')
        img_list.append(pic_src)
        print(len(img_list), '获取图片下载地址：', pic_src)

    temp_server = get_temp_server(url_detail)

    if img_bf:
        next_page_bf = bf.find_all('a', 'page-ch')
        if next_page_bf and next_page_bf[-1].has_attr('href'):
            next_page_url = temp_server + next_page_bf[-1].get('href')
            if next_page_bf[-1].text == 'ÏÂÒ»Ò³':
                get_detail_pic(None, next_page_url, img_list)


def get_temp_server(url_detail):
    temp_server = url_detail[:url_detail.rfind('/') + 1]
    return temp_server


def download(photo_url, dir_name, filename):
    jpg_filename = dir_name + '/%d.jpg' % filename
    if not exists(jpg_filename):
        with closing(requests.get(url=photo_url, stream=True, timeout=90)) as r:
            with open(jpg_filename, 'ab+') as f:
                for chunk in r.iter_content(chunk_size=1024):
                    if chunk:
                        f.write(chunk)
                        f.flush()


def start_down_pic_by_cc(temp_bf, url, detail_name):
    images = []
    name = 'images/' + detail_name
    name = name.strip()
    if not exists(name):
        makedirs(name)
    else:
        return

    get_detail_pic(temp_bf, url, images)
    for i in range(len(images)):
        print('下载第{}张图...'.format(str(i + 1)))
        download(images[i], name, i + 1)


def just_url(temp_url, detail_name):

    temp_bf = req_get_bf(temp_url)
    detail_pic_bf = temp_bf.find_all('div', 'content-pic')
    if detail_pic_bf:
        try:
            start_down_pic_by_cc(temp_bf, temp_url, detail_name)
        except requests.exceptions.RequestException:
            pass
        except OSError:
            pass
    else:
        list_bf = temp_bf.find_all('dl', "list-left public-box")
        if list_bf:
            href_bf = list_bf[0].find_all(href=re.compile('.*html$'))
        else:
            href_bf = temp_bf.find_all(href=re.compile('.*html$'))
        for href_tag in href_bf:
            name_for_this = href_tag.parent.text.encode('latin1').decode('gbk')
            print("-----------------", name_for_this, "-------------------")
            href_url = href_tag.get('href')
            if not href_url.startswith(server):
                href_url = server + href_url
            try:
                if temp_url != href_url:
                    print(href_url)
                    just_url(href_url, name_for_this)
            except requests.exceptions.RequestException:
                return
            except OSError:
                pass

        if list_bf:
            temp_server = get_temp_server(temp_url)
            next_page_bf = temp_bf.find_all('a', 'page-ch')
            if next_page_bf and next_page_bf[-1].has_attr('href'):
                next_page_url = temp_server + next_page_bf[-1].get('href')
                if next_page_bf[-1].text == 'ÏÂÒ»Ò³':
                    just_url(next_page_url, temp_url[temp_url.rfind('/') + 1: temp_url.rfind('.')])


if __name__ == '__main__':
    html = "http://www.735357.com"
    just_url(html, 'home')
