# /usr/bin/env python
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
import os.path

path = "E:/data/BeautyLeg/"
# url = "http://1024.sdccbly.pw/pw/html_data/106/1905/4095962.html"
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}

base_url = "https://www.meitulu.com/t/beautyleg/"
length = 25


def get_page_urls(base_url):
    page_urls = []
    i = 1
    page_urls.append(base_url)
    for i in range(1, 25):
        page_urls.append(base_url + str(i+1) + ".html")
    return page_urls


def get_item_urls(page_url):
    item_urls = []
    source_code = requests.get(page_url, headers=headers)
    source_code.encoding = 'utf-8'
    plain_text = source_code.text
    soup = BeautifulSoup(plain_text,  'lxml')
    item_elements = soup.find('ul', {'class': 'img'})
    a_list = item_elements.select("a")
    for a_item in a_list:
        # print(a_item)
        # print(a_item.get('class') is None)
        # print(len(a_item.select('img')) > 0)
        if a_item.get('class') is None and len(a_item.select('img')) > 0:
            item_urls.append(a_item.get('href'))
        # p_elements = a_item.find('p', {'class': 'p_title'})
    f1 = open('E:\\data\\beautyleg.txt', 'a')
    for item_url in item_urls:
        f1.write(item_url + "\n")
    return item_urls
        # item_urls.append(item_url)
    # print(item_urls)
    # print(a_list)


def get_img_list(item_url):
    source_code = requests.get(item_url, headers=headers)
    source_code.encoding = 'utf-8'
    plain_text = source_code.text
    soup = BeautifulSoup(plain_text,  'lxml')
    pages_elements = soup.find('div', {'id': 'pages'})
    page_scope = int(pages_elements.find_all('a')[-2].text) + 1
    img_list = []

    i = 1
    for i in range(1, page_scope):
        if i == 1:
            img_content = soup.find('div', {'class', 'content'})
            img_elements = img_content.find_all('img')
            for img_element in img_elements:
                img_list.append(img_element.get('src'))
        else:
            item_url_prefix = item_url[:item_url.rfind(".")]
            tmp_url = item_url_prefix + "_" + str(i) + ".html"
            source_code = requests.get(tmp_url, headers=headers)
            source_code.encoding = 'utf-8'
            plain_text = source_code.text
            soup = BeautifulSoup(plain_text, 'lxml')
            img_content = soup.find('div', {'class', 'content'})
            img_elements = img_content.find_all('img')
            for img_element in img_elements:
                img_list.append(img_element.get('src'))
    return img_list


def get_title(item_url):
    source_code = requests.get(item_url, headers=headers)
    source_code.encoding = 'utf-8'
    plain_text = source_code.text
    soup = BeautifulSoup(plain_text,  'lxml')
    title_element = soup.find('div', {'class': 'weizhi'})
    print(title_element.find('h1').text)
    return title_element.find('h1').text


def img_download(links,title):
    i = 1
    for item in links:
        res = requests.get(item)
        print(item)
        tmp_dir = path + title + "/"

        print(tmp_dir)
        mkdir(tmp_dir)
        with open(tmp_dir + str(i).zfill(3) + file_extension(item), 'wb') as f:
            f.write(res.content)
        i = i + 1


def mkdir(path):
    # 引入模块
    import os

    # 去除首位空格
    path = path.strip()
    # 去除尾部 \ 符号
    path = path.rstrip("\\")

    # 判断路径是否存在
    # 存在     True
    # 不存在   False
    isExists = os.path.exists(path)

    # 判断结果
    if not isExists:
        # 如果不存在则创建目录
        # 创建目录操作函数
        os.makedirs(path)

        print
        path + ' 创建成功'
        return True
    else:
        # 如果目录存在则不创建，并提示目录已存在
        print
        path + ' 目录已存在'
        return False


def file_extension(path):
  return os.path.splitext(path)[1]

def read_urls():
    file = open('E:/data/beautyleg.txt')
    lines = file.readlines()

    strr = ''.join(lines)

    urls = strr.split( )
    print(len(urls))
    return urls


if __name__ == "__main__":
    # get_title('https://www.meitulu.com/item/21018.html')
    # 获取图库url清单
    # pages = get_page_urls("https://www.meitulu.com/t/beautyleg/")
    # for page in pages:
    #     get_item_urls(page)
    # print("ok")

    # f = open("E:\\data\\beautyleg.txt")
    # # 调用文件的 readline()方法
    # line = f.readline()  # 每次读取一行内容
    #
    # while line:
    #     print(line, end='')  # end = ''表示不换行
    #     img_urls = get_img_list(line)
    #     # print(line)  # 默认换行
    #     title = get_title(line)
    #
    #     line = f.readline()
    #     img_download(img_urls, title)
    # f.close()

    # imgs = get_img_list("https://www.meitulu.com/item/21018.html")
    # title = get_title("https://www.meitulu.com/item/21018.html")
    # img_download(imgs, title)
    urls = read_urls()
    for url in urls:
        # img_download(url)
        print(url)
        title = get_title(url)
        links = get_img_list(url)
        img_download(links, title)
    print('下载完成')
