#!/usr/bin/python3
# coding： utf8
from bs4 import BeautifulSoup
from urllib import request
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time

url = 'http://www.1kkk.com/manhua18071/'
charset = 'utf-8'
#driverPath = '/root/Downloads/geckodriver'
driverPath = 'E:/geckodriver.exe'
req = request.urlopen(url).read()
html = req.decode(charset)
soup = BeautifulSoup(html, 'lxml')
data = soup.findAll("ul", {"class": "view-win-list detail-list-select", "id": "detail-list-select-2"})
img_url = []
for i in BeautifulSoup(str(data), 'lxml').findAll("li"):
    img_url.append("http://www.1kkk.com" + i.a.get('href'))

browser = webdriver.Firefox(executable_path=driverPath)
# 最大化窗口
browser.maximize_window()
for index in range(len(img_url)):
    i = img_url[index]
    browser.get(i)
    maxPage = 10
    soup = BeautifulSoup(browser.page_source, 'lxml')
    imgList = list()
    try:
        while (True):
            soup = BeautifulSoup(browser.page_source, 'lxml')
            imgSrc = BeautifulSoup(str(soup.find_all(id='cp_image')), 'lxml').img.get('src')
            print(imgSrc)
            imgList.append(imgSrc)
            browser.find_element_by_id('cp_image').click()
            time.sleep(5)
    except BaseException as exp:
        print('章节连接准备结束!')
    # 保存图片
    page = 0

    savePath = 'comics/第' + str(index + 1) + '卷,'
    for img in imgList:
        print(img)
        browser.get(img)
        page += 1
        path = savePath + str(page) + '.png'
        browser.save_screenshot(path)
        print("{}保存完毕".format(path))
        time.sleep(1)
