# coding=utf-8

from bs4 import BeautifulSoup
import urllib2
import time
import tarfile
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")

if len(sys.argv) < 4:
    print "Please specify page!"
    exit()

# base_url = 'http://yxpjw.me/luyilu/2017/0304/'
base_url = 'http://yxpjw.me/luyilu/2017/' + sys.argv[1] + '/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}


def retrieve_all_imgs(soup):
    imgs = soup.find_all('img', {'class': '', 'title': ''})

    for img in imgs:
        if img.get('alt') is None:
            imgs.remove(img)

    for img in imgs:
        save_img(img.get('src'))


def retrieve_page_soup(request_url):
    request = urllib2.Request(url=request_url, headers=headers)
    html = urllib2.urlopen(request).read()
    soup = BeautifulSoup(html, 'html.parser')
    return soup


def save_img(image_url):
    print 'saving img:' + image_url
    file_name = image_url.split('/')[-1]

    request1 = urllib2.Request(url=image_url, headers=headers)
    img_data = urllib2.urlopen(request1).read()

    new_file = open(sys.argv[3] + '/' + file_name, 'w')
    new_file.write(img_data)
    new_file.close()


def main():

    url = base_url + sys.argv[2] + '.html'

    print 'The url to retrieve is: ' + url

    soup = retrieve_page_soup(url)

    while True:
        print 'url=' + url
        retrieve_all_imgs(soup)

        if soup.find('li', {'class': 'next-page'}):
            url = base_url + soup.find('li', {'class': 'next-page'}).a.get('href')
            soup = retrieve_page_soup(url)
            time.sleep(5)
        else:
            break

    if not os.path.exists(sys.argv[3]):
        os.makedirs(sys.argv[3])

    tar = tarfile.open(sys.argv[3] + '.tar.gz', 'w:gz')
    for f in os.listdir(sys.argv[3]):
        tar.add(sys.argv[3] + '/' + f)

    tar.close()
    os.remove(sys.argv[3] + '.tar.gz')


if __name__ == '__main__':
    main()
