#!/usr/bin/python
# author Leon.K <mashanhu@gmail.com>

import re
import urllib, urllib2, cookielib

# login xiaonei
# params: type your own email && pass
def login_xn():
    cookie_jar = cookielib.CookieJar()
    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar))
    params = {'email':'your_own_email_address', 'password':'your_own_password'}
    data = urllib.urlencode(params)
    opener.open('http://www.renren.com/PLogin.do?domain=renren.com', data)
    return opener;

# get the image or the page url
# the regex is not the best(ok , it's the worst , but available)
# u could write the better regex
def get_url(url,type):
    if type == "image1":
	pattern = re.compile("http:\\\/\\\/\w{3,6}\.xnimg\.cn\\\/\w{0,6}\W{0,2}\d{8}\\\/\d{2}\W{0,2}\d{2}\\\/\w{0,2}large_\w{4}_\w{10,16}.jpg")
    elif type == "image2":
	pattern = re.compile("http:\\\/\\\/\w{3,6}\.xnimg\.cn\\\/\w{0,6}\W{0,2}\w{0,6}\W{0,2}\d{8}\\\/\d{2}\W{0,2}\d{2}\\\/\w{0,2}large_\w{0,8}_{0,1}\w{0,16}.jpg")
    elif type == "image3":
	pattern = re.compile("http:\\\/\\\/\w{3,6}\.img\.xiaonei\.com\\\/\w{0,6}\W{0,2}\d{8}\\\/\d{2}\W{0,2}\d{2}\\\/\w{0,2}large_\w{0,8}_{0,1}\w{0,16}.jpg")
    elif type == "image4":
	pattern = re.compile("http:\\\/\\\/\w{3,6}\.xiaonei\.com\\\/\w{0,6}\W{0,2}\w{0,6}\W{0,2}\d{8}\\\/\d{2}\W{0,2}\d{2}\\\/\w{0,2}large_\w{0,8}_{0,1}\w{0,16}.jpg")
    elif type == "page":
	pattern = re.compile("http://photo.renren.com/photo/\d{9}/photo-\d{8,10}")
    else:
	pattern = re.compile("")
    opener = login_xn()
    try:
	fp = opener.open(url)
    except:
        print 'get url exception'
        return 0
    
    s = fp.read()
    fp.close()

    urls = pattern.findall(s)
    if len(urls) == 0 and type == "image1":
	return get_url(url, "image2")
    elif len(urls) == 0 and type == "image2":
	return get_url(url, "image3")
    elif len(urls) == 0 and type == "image3":
	return get_url(url, "image4")
    else:
        return urls

# parse the url and make the real download urls into an file
# url: album address
# file_name: the file u want to save the urls
# page: how many pages the album has
# for now , it can't download file all by the program it self
# so , if you implement it , pls cantact me:)
def make_file(url, file_name, page):
    file = open(file_name, "w")
    file.write("right click on the space , and choose down them all with your favourite download tools.</br>")
    for index in range(0, int(page)):
	l_url = url + "?curpage="
	l_url += str(index)

	page_list = get_url(l_url, "page")
        for page_url in page_list:
	    print page_url
	    image_list = get_url(page_url, "image1")
	    print image_list
	    flag = 1
	    for image_url in image_list:
		if check_repeat(flag) == 1:
		    content = image_url.replace("\/","/")
		    print content
		    file.write('<a href="' + content + '">xiaonei_image_file.jpg</a></br>')
		else:
		    continue
		flag += 1
    file.close()

# just a cake
def check_repeat(flag):
    return flag % 2

# console
def console(file_name):
    url = raw_input("type the album site : ")
    page = raw_input("type the total pages : ")
    make_file(url, file_name, page)

console("c:\\xn_img_list.html")
