import urllib2
import os
import sys
import time
from selenium.webdriver.support.select import Select

from selenium import webdriver

browser = webdriver.PhantomJS()

base_url = 'http://www.66mh.cc'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Safari/604.1.38'}


def save_img(file_path, image_url):
    request1 = urllib2.Request(url=image_url, headers=headers)
    img_data = urllib2.urlopen(request1).read()

    new_file = open(file_path, 'w')
    new_file.write(img_data)
    new_file.close()


def get_img_url(url):
    try:
        browser.get(url)
        img = browser.find_element_by_id('qTcms_pic')
        return img.get_attribute('src')
    finally:
        print 'save image finish'


def save_chapter(folder, chapter_url):
    print 'saving chapter:' + str(folder) + ' url=' + chapter_url
    if not os.path.exists(str(folder)):
        os.makedirs(str(folder))

    browser.get(chapter_url)
    browser.implicitly_wait(2)
    select = Select(browser.find_element_by_id('qTcms_select_i'))
    options = select.options

    index = 0

    for o in options:
        temp_url = chapter_url + '?p=' + str(index + 1)
        print 'retrieving url:' + temp_url
        browser.get(temp_url)
        time.sleep(2)
        select = Select(browser.find_element_by_id('qTcms_select_i'))

        print 'saving img'
        save_img(file_path='{0}/{1}.jpg'.format(folder, index + 1), image_url=get_img_url(temp_url))
        time.sleep(2)
        index += 1

        print 'next page'

    # for i in range(1, pic_count):
    #     url = url + '?p=' + str(i)
    #     image_url = get_img_url(url)
    #     save_img(str(i) + '.jpg', image_url)


def main():
    if len(sys.argv) < 2:
        print 'No specefied page.'
        return

    url = base_url + '/comic/' + sys.argv[1] + '.html'
    print 'retrieving page:' + url
    browser.get(url)
    div = browser.find_element_by_id('play_0')
    ul = div.find_element_by_tag_name('ul')
    li = ul.find_elements_by_tag_name('li')
    li.reverse()

    index = 1

    chapter_urls = []

    for l in li:
        chapter_urls.append(l.find_element_by_tag_name('a').get_attribute('href'))

    for chapter_url in chapter_urls:
        save_chapter(folder=index, chapter_url=chapter_url)
        index += 1


if __name__ == '__main__':
    main()




