﻿#!/usr/bin/env python
# -*- coding:utf-8 -*- 
import re
import urllib2
import xmlrpclib
import base64
import cookielib
import chardet
import os
import urllib

def get_luobo():
    lst_img=[]
    cj=cookielib.LWPCookieJar()
    opener=urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
    urllib2.install_opener(opener)
    headers={
'User-Agent':'Mozilla/5.0 Firefox/3.6.8',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'en-us,en;q=0.5',
'Accept-Encoding':'deflate',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive':'115',
'Connection':'keep-alive',
'Cache-Control':'no-cache',}
    print 'start request url'
    #req=urllib2.Request(url,headers=headers)
    #html=opener.open(req).read()
    #print len(html),'read html ok'
    for i in xrange(1,13):
        print '-------------------------------------------'
        print 'start find page %s ' %(i)
        url='http://luo.bo/category/photos/page/%s/' %(i)
        req=urllib2.Request(url,headers=headers)
        html=opener.open(req).read()
        lst_url_imgpage=re.findall('<a href=\"(http://luo.bo/[\s\S]*?/)\" title=\"([\s\S]*?)" ><img[\s\S]*?</a>',html)
        
        for i in lst_url_imgpage:
            print '-------------------------------\n',i[0]
            lst_img.append(i[0])
    lst_img_last=[]
    for i in lst_img:
        print '|||||||||||||||||||||||||||||||||||||||||||||\n','start find right url'
        #url=re.findall('http://luo.bo/([\s\S]*?)/\"',i)
        #print url[0]
        #right_url=''.join(['http://luobo/',url[0],'/'])
        if len(i)<=22:
            lst_img_last.append(i)
        else:
            pass
    print lst_img_last
    #f=open(r'H:\eclipseworpplace\pack\src\luobo.txt','w')
    #f.write(lst_img_last)
    #f.close
    return lst_img_last

if __name__ =='__main__':
	li=get_luobo()
	import pickle
	f=open(r'list.pic','w')
	pickle.dump(li,f)
	f.close()
