#!/usr/bin/env python
# -*- coding:utf-8 -*- 
import urllib2
import re
import pickle
from find_qq_pic import*
def get_blog_url(lst_url_tag):
	lst_url=[]
	for i in lst_url_tag:
		flag=1
		while flag:
	#url="http://www.lureyou.com/html/ytag/xiezhen/page/%s" % flag
			url="%s/page/%s" % (i[0],flag)
			print url
			print '----------------------------------'
			try:
				body=urllib2.urlopen(url).read()
				li=re.findall('<div class=\"boxcaption\"><h3><a href=\"([\s\S]*?)\"',body)
				if li:
					for j in li:
						lst_url.append(j)
						print li
					flag+=1
				else:
					flag=0
			except:
				flag=0
	f=open('www.lureyou.com.pic','w')
	pickle.dump(lst_url,f)
	f.close()

def get_blog_tag():
        lst_url=[]
        #for i in xrange(1,8):
        url="http://www.lureyou.com/html/ytag/megan-fox" 
        print url
        print '----------------------------------'
        body=urllib2.urlopen(url).read()
        li=re.findall('<a href=\'(http\://www.lureyou.com/html/ytag/[\s\S]*?)\'[\s\S]*?>([\s\S]*?)</a>',body)
        for i in li:
                lst_url.append((i[0],i[1]))
        print li
        f=open('www.lureyou.com.tag.pic','w')
        pickle.dump(lst_url,f)
        f.close()
        return lst_url
def find_blog(url):
    print 'start'
    cj=cookielib.LWPCookieJar()
    opener=urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
    urllib2.install_opener(opener)
    headers={
'User-Agent':'Mozilla/5.0 Firefox/3.6.8',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'en-us,en;q=0.5',
'Accept-Encoding':'deflate',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive':'115',
'Connection':'keep-alive',
'Cache-Control':'no-cache',}
    print 'start request url'
    req=urllib2.Request(url,headers=headers)
    html=opener.open(req).read()
    print len(html),'read html ok'
    title=re.findall('<title>([\s\S]*?)</title>',html)
    title=title[0]
    title=title.split('|')[0]
    #title=re.sub('腾讯网','',title)
    lst_img_url=re.findall('<p style=\"text-align\: center\;\"><img src=\"([\s\S]*?)\"',html)
    catalog=re.findall('rel\=\"category tag\">([\s\S]*?)</a>',html)[0]
    tag=re.findall('rel\=\"tag\">([\s\S]*?)</a>',html)[0]
    lst_img=[]
    for i in lst_img_url:
        lst_img.append(('','',i))
    return lst_img,title,catalog,tag
def save_pic():
		lst_url_tag=get_blog_tag()
		get_blog_url(lst_url_tag)

if __name__=='__main__':
	#get_blog_url()
	#save_pic()
	f=open('www.lureyou.com.pic')
	lst_url=pickle.load(f)
	for i in range(500):
		del lst_url[0]
	num=0
	for i in lst_url:
		print i 
		lst_img,title,catalog,tag=find_blog(i)
		#tag=u'正妹'#raw_input('请输入标签：')
		#catalog=u'美女'#raw_input('请输入目录：')
		name=''#raw_input('请输入名称：')
		num+=1
		try:
			data_site(lst_img,title,name,tag,catalog)
		except:
			pass
		print '$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$'
		print 'process %s work well' % i
		print '==========================================='
		print num
