#!/usr/bin/env python
# -*- coding:utf-8 -*- 
import re
import urllib2
import xmlrpclib
import base64
import cookielib
import chardet
import os
import urllib
lst_tag=[u'写真',u'搞笑',u'恶搞',u'爱情',u'囧',u'雷人',u'美女',u'有趣',u'牛人',u'大学',u'广告',u'天雷',u'社会',u'动物',u'可爱',u'杯具']
lst_catalog=[u'科技',u'娱乐',u'互联网',u'美女',u'写真',u'社会',u'热点',u'XX门',u'民生',u'google']
def code_data(tx):
        try:
            message_body=tx.encode('utf-8')
        except:
            nb_encode=chardet.detect(tx) 
            nb_code=nb_encode['encoding']
                #logging.info(nb_code)
            message_body=tx.decode(nb_code,'ignore').encode('utf-8')
        return message_body      #unicode(message_body,'utf-8','ignore')

def find_luobo(url):
    print 'start'
    cj=cookielib.LWPCookieJar()
    opener=urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
    urllib2.install_opener(opener)
    headers={
'User-Agent':'Mozilla/5.0 Firefox/3.6.8',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'en-us,en;q=0.5',
'Accept-Encoding':'deflate',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive':'115',
'Connection':'keep-alive',
'Cache-Control':'no-cache',}
    print 'start request url'
    req=urllib2.Request(url,headers=headers)
    html=opener.open(req).read()
    print len(html),'read html ok'
    title=re.findall('<title>([\s\S]*?)</title>',html)
    title=title[0]
    title=title.split('-')[0]
    lst_img_url=re.findall('<img src=\"(http://cdn.dulei.si/files[\s\S]*?)\"',html)
    try:
        del lst_img_url[-1]
        del lst_img_url[-1]
    except:
        pass
    lst_img=[]
    for i in lst_img_url:
        lst_img.append(('','',i))
    return lst_img,title



def find_qq(url):
    print 'start'
    cj=cookielib.LWPCookieJar()
    opener=urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
    urllib2.install_opener(opener)
    headers={
'User-Agent':'Mozilla/5.0 Firefox/3.6.8',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'en-us,en;q=0.5',
'Accept-Encoding':'deflate',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive':'115',
'Connection':'keep-alive',
'Cache-Control':'no-cache',}
    print 'start request url'
    req=urllib2.Request(url,headers=headers)
    html=opener.open(req).read()
    print len(html),'read html ok'
    title=re.findall('<title>([\s\S]*?)</title>',html)
    title=title[0]
    title=re.sub('腾讯网','',title)
    lst_img=re.findall('showtxt\:\'([\s\S]*?)\'\,smallpic\:\'([\s\S]*?)\'\,\'bigpic\'\:\'([\s\S]*?)\'',html)
    return lst_img,title
def data_site(lst_img,title,name,tag,catalog):
    '''lst_img[u'内容'，小图可以空格，大图地址]'''    
    lst_url=[]
    if 'http://' in lst_img[0][2]:
        cj=cookielib.LWPCookieJar()
        opener=urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        urllib2.install_opener(opener)
        headers={
'User-Agent':'Mozilla/5.0 Firefox/3.6.8',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'en-us,en;q=0.5',
'Accept-Encoding':'deflate',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive':'115',
'Connection':'keep-alive',
'Cache-Control':'no-cache',}
        print 'start request url'
        #req=urllib2.Request(url,headers=headers)
    else:
        pass
    title=base64.b64encode(title)
    for i in lst_img:
        #context=i[0]
        print 'start conntion to rpc'
        
        s=xmlrpclib.ServerProxy('http://a-ablum.appspot.com/rpc')
        if 'http://' in lst_img[0][2]:
            req=urllib2.Request(i[2],headers=headers)
            blob=base64.b64encode(opener.open(req).read())
        else:
            print i
            pic=open(i[2]).read()
            blob=base64.b64encode(pic)
        title = code_data(title)
        #tag=u'其他'
        #catalog=u'未分类'
        name=''
        name=base64.b64encode(name)
        #title=base64.b64encode(title)
        summary=base64.b64encode(i[0])
        img_url=s.saveimg(name,blob,summary,title,tag,catalog)
        lst_url.append((img_url,i[0],i[1]))#i[1]为小图地址 可以不填
        print img_url
    nowid=urllib2.urlopen('http://dongfengkan.appspot.com/getnowid').read()
    title=base64.b64decode(title)
    print nowid
    if nowid:
        pass
    else:
        nowid=urllib2.urlopen('http://dongfengkan.appspot.com/getnowid').read()
        print nowid
    nowid=int(nowid)+1
    data_post={'id_group':code_data(str(nowid)),'title':code_data(title),'name':'','tag':code_data(tag),'catalog':code_data(catalog)}
    
    #req_post=urllib2.Request('http://dongfengkan.appspot.com/up',data=urllib.urlencode(data_post))
    #创建一个帖子目录
    post_rpc(id_group=code_data(str(nowid)),title=code_data(title),name=code_data(title),tag=code_data(tag),catalog=code_data(catalog))
    #f=urllib2.urlopen(req_post)
    #print f.read()
    for i in lst_url:
        data_post={'id_group':int(int(nowid)+1),'title':title,'name':'','tag':tag,'catalog':catalog,'photo_descrip':i[1],'photo_url':i[0]}
        #req_post=urllib2.Request('http://dongfengkan.appspot.com/up',data=urllib.urlencode(data_post))
        #f=urllib2.urlopen(req_post)
        print str(nowid)
        print i[0]
        print 'atart add %s gruop picture' %(str(i[0]))
        #加入不同的帖子到目录
        post_rpc(id_group=code_data(str(nowid)),title=title,name=code_data(title),tag=tag,catalog=catalog,photo_descrip=i[1],photo_url=i[0])
        print 'add some photos'
        #print f.read()
def post_rpc(**args):
    g_photo_descrip   =code_data(args.get('photo_descrip') or 'no')
    print g_photo_descrip
    print '---------------------------------'
    g_photo_url       =code_data(args.get('photo_url') or r'http://s-album.appspot.com/img/88f5db500777fad5cc4770da32c0657f') 
    print g_photo_url
    print '---------------------------------'    
    g_name            =code_data(args.get('name') or 'noname') 
    print g_name
    print '---------------------------------'    
    g_title           =code_data(args.get('title') or 'notitle') 
    print g_title
    print '---------------------------------'            
        #pub_data        =db.DateTimeProperty(auto_now_add=True)
    g_tag             =code_data(args.get('tag') or u'notag') 
    print g_tag
    print '---------------------------------'    
    g_catalog         =code_data(args.get('catalog') or u'nocatalog') 
    print g_catalog
    print '---------------------------------'    
    g_name_group      =code_data(args.get('name_group') or u'test')
    print g_name_group
    print '---------------------------------'    
    g_id_group        =code_data(args.get('id_group') or u'0001')
    print g_id_group
    print '---------------------------------'    
    photo_descrip   =base64.b64encode(g_photo_descrip)
    photo_url       =base64.b64encode(g_photo_url)
    print '----------------------------------------------'
    print g_name #base64.b64decode(base64.b64decode(base64.b64decode(g_name)))
    print '++++++++++++++++++++++++++++++++++++++++++++++'
    name            =base64.b64encode(g_name) ###base64.b64decode(base64.b64decode(g_name))    #base64.b64encode(g_name)
    title           =base64.b64encode(g_title)
            
        #pub_data        =db.DateTimeProperty(auto_now_add=True)
    tag             =base64.b64encode(g_tag)
    catalog         =base64.b64encode(g_catalog)
    #name_group      =base64.b64encode(g_name_group)
    id_group        =base64.b64encode(g_id_group)
    s=xmlrpclib.ServerProxy('http://dongfengkan.appspot.com/rpc')
    s.savepost(name,title,photo_descrip,photo_url,tag,catalog,id_group)
    
def readfilepic(url_file):
    if url_file:
        lst_imgpath=os.listdir(url_file)
    elif url_file=='':
        lst_imgpath=os.listdir(os.getcwd())
        url_file=os.getcwd()
    else:
        pass
    lst_img=[]
    for i in lst_imgpath:
        lst_img.append((i.split('.')[0],'',os.path.join(url_file,i)))
        print os.path.join(url_file,i)
    print lst_img
    return lst_img,os.path.split(url_file)[-1]
def fenxi_url(url_file):
    if 'http://' in url_file:
        if 'qq.com' in url_file:
            lst_img,title=find_qq(url_file)
        elif 'luo.bo' in url_file:
            print 'use luobo'
            lst_img,title=find_luobo(url_file)
    else:
        lst_img,title=readfilepic(url_file)
    return lst_img,title
    
if __name__ == '__main__':
    #url_file=raw_input('请输入文件地址或者网址：')
    #f=open('H:\eclipseworpplace\pack\src\luobo.txt').read()
    #lst=re.findall('http://luo.bo/([\s\S]*?)/',f)
    import pickle
    f=open('list.pic')
    lst=pickle.load(f)
    f.close()
    num=0
    for i in range(332):
        del lst[0]
    for i in lst:
            #url=''.join(['http://luo.bo/',i,'/'])
            print i
            lst_img,title=fenxi_url(i)
            tag=u'搞笑'#raw_input('请输入标签：')
            catalog=u'搞笑'#raw_input('请输入目录：')
            name=''#raw_input('请输入名称：')
            num+=1
            try:
                data_site(lst_img,title,name,tag,catalog)
            except:
                pass
            print '==================================='
            print num
    #find_qq(r'http://news.qq.com/a/20101125/000692.htm#p=1',tag=lst_tag[0],catalog='other')
    print 'save ok '
