﻿#coding=utf-8

import urllib,urllib2,cookielib,re,datetime,socket
import EncodingConvert as ec
import time, threading, traceback, os, sys, uuid
from ripper.handler import socks

'''
    Discuz论坛爬虫,带附件下载
    原作http://www.colordancer.net/blog/?p=325
    修改chris
    2010-09-18
'''

gheaders = None

SITE_DOMAIN = ''
SITE_HEADER = SITE_DOMAIN + '/forum/'
rootpath = '.\\downloads\\'
ZXBASE = 'e:/zx2/'
i_lock = threading.Lock()
WORK_INTERVAL = 1

# timeout in seconds
timeout = 60
#socket.setdefaulttimeout(timeout)



def clear_url(torrentUrl):
    torrentUrl = torrentUrl.replace(' ','')
    torrentUrl = torrentUrl.replace('@','')
    torrentUrl = torrentUrl.split('<')[0]
    torrentUrl = torrentUrl.split('\'')[0]
    torrentUrl = torrentUrl.split('\"')[0]
    return torrentUrl

#  返回正则表达式匹配的段落
def getRegFromText(pt, ct):
    m = re.findall(pt, ct)
    if len(m) == 0:
        return None
    if len(m) == 1:
        return m[0]
    return m

def getPageHtml(uri, retry=5):
    #i_lock.acquire()
    #time.sleep(WORK_INTERVAL)
    try:
        return _getPageHtml(uri)
    except Exception, ex:
        print 'error getting', uri
        print ex
        if retry > 0:
            print 'retry'
            time.sleep(8-retry)
            return getPageHtml(uri, retry-1)
        else :
            print 'already retry %d times, game over.' % retry
    finally:
        pass
        #i_lock.release()
        
def _getPageHtml(uri):
    print 'fetching ', uri
    req = None
    if gheaders != None:
        req = urllib2.Request(url=uri, headers=gheaders)
    else:
        req = urllib2.Request(uri)
    ct = urllib2.urlopen(req).read()
    ct = ec.zh2utf8(ct)[1]
    return ct
    #return urllib.urlopen(uri).read()

# 下载论坛附件
def getAttachment(uri, fname, retry=15):
    print 'downloading attach to ', fname
    try:
        #name2 = os.path.join(os.path.dirname(sys.argv[0]), fname)
        name2 = fname
        __getAttachment(uri, name2)
        # 切割压缩图片文件
#        if isImageFile(name2):
#            createThumbnail(os.path.join(ZXBASE, name2))
        return fname
    except Exception, ex:
        traceback.print_exc(file=sys.stdout)
        print 'error getting attache', uri
        if retry > 0:
            print 'retry'
            time.sleep(26-retry)
            return getAttachment(uri, fname, retry-1)
        else :
            print 'attachment already retry %d times, game over.' % retry
    finally:
        pass
        
def __getAttachment(uri, fname):
    print 'fetching ', uri
    req = None
    if gheaders != None:
        req = urllib2.Request(url=uri, headers=gheaders)
    else:
        req = urllib2.Request(uri)
    r = urllib2.urlopen(req)
    fp = open(os.path.join(ZXBASE, fname), 'wb')
    fp.write(r.read())
    fp.close()
    return fname
    
def login():
    '''登陆论坛
  
    设置cookie，获得formhash，然后提交post数据 '''
  
    global gheaders
    
    if gheaders != None:
        print 'already login.'
        return
  
    #获得formhash
    print 'prepare to login',SITE_HEADER,'...'
    pattern = re.compile("<input type=\"hidden\" name=\"formhash\" value=\"\w*\" \/>")
    content = getPageHtml(SITE_HEADER + 'logging.php?action=login')
    formhash = pattern.findall(content)
    if (len(formhash) > 0):
        formhash = formhash[0]
        formhash = formhash[-12:-4]

        #cookie
        cookieJar = cookielib.CookieJar()
        cookie_support= urllib2.HTTPCookieProcessor(cookieJar)
        opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)
        
        #login
        postdata=urllib.urlencode({
             'loginfield':'username',
             'username':'outlookxx123',
             'password':'bjitsm123456',
             'referer': SITE_DOMAIN,
             'formhash':formhash,
             'questionid':'0',
             'answer':''
        })
        
        gheaders = {
             'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
             'referer':SITE_DOMAIN
        }
        print 'login.. '
        req = urllib2.Request(
              url = SITE_HEADER + 'logging.php?action=login&loginsubmit=yes&inajax=1',
              data = postdata,
              headers = gheaders
        )
        result = urllib2.urlopen(req).read()
        #f = open('logs/last_login_result.html', 'w')
        #f.write(result)
        #f.close()
        print 'login compelete. '
  
def getPages():
    page = getPageHtml(SITE_HEADER + 'thread-3350257-1-1.html')
    pattern = re.compile("<link>.*viewthread.php.*<\/link>")
    linkArray = pattern.findall(page)
    return linkArray

def tgetLinks(url):
    ct = getPageHtml(url)
    return getLinks(ct)
    
def getLinks(pageContent):
    #遍历页面
    count = 1
    names = []
    #for url in urls:
        #url = url[6:-7]
        #print "parsing" + url
        #pageContent = getPageHtml(url)
        #print pageContent
    pattern = re.compile('<a href=\"attachment\.php\?aid=.*>.*<\/a>')
    anchors = pattern.findall(pageContent)
    #遍历下载节点
    for anchor in anchors:
        name = ''
        try:
            #print anchor
            linkPattern = re.compile('\"attachment\.php\?aid=[a-zA-Z0-9\%&;=\?-_\B]*\"')
            link = linkPattern.findall(anchor)
            link = SITE_HEADER + link[0][1:-1]
            #print 'attach link',link
            namePattern = re.compile('>[^<>].*[^<>]<\/')
            name = namePattern.findall(anchor)                                  
            name = name[0][1:-2]
            name = stripInvalChar(name)
            
            date = GetDateString()
            time = GetTimeString()
#            name = rootpath + date + "\\" + time + "_" + getId() + '_' + name    
            #print "download " + link
            #print "to" + name
            count = count + 1
            download(link,name)
        except Exception, ex:
            traceback.print_exc(file=sys.stdout)
        if name != '':
            names.append(name)
    return names
            
def stripInvalChar(name):
    name = name.replace('(', ' ')
    name = name.replace(')', ' ')
    return name
            
def download(url,filename):
    MakeDateFolder(rootpath)
    #urllib.urlretrieve(url, filename)
    getAttachment(url, filename)
  
def GetTimeString():
    from datetime import datetime
    todayStr = datetime.today().strftime("%H_%M_%S")
    return todayStr

def GetDateString():
    from datetime import date
    #todayStr = date.today().strftime("%Y_%m_%d")
    todayStr = date.today().strftime("%Y_%m")
    return todayStr

def MakeDateFolder( inFolderName ):
    if os.path.isdir( inFolderName ):
        newFolderName = inFolderName + '\\\\' + GetDateString()
        newFolderName = os.path.join(ZXBASE, newFolderName)
        #print 'making new download dir:', newFolderName
        if os.path.isdir( newFolderName ):
            #print(newFolderName," Exists already ")
            pass
        else:
            os.mkdir( newFolderName )
            #print(newFolderName," Create OK ")
            
def isImageFile(fname):
    imgs = ['.jpg', '.jpeg', '.png', '.bmp']
    for i in imgs:
        if i in fname.lower():
            return True
    return False

# 生成UUID, 命名图片和文本文件
def getId():
    id0 = str(hex(int(str(time.time()).replace('.',''))))
    id1 = str(uuid.uuid1())
    return id0+'-'+id1

def enable_proxy():
    print 'enabling socket proxy:' , '127.0.0.1:7070'
    socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 7070)
    socket.socket = socks.socksocket
#            socket.create_connection = _create_connection

def disable_proxy():
    socks.setdefaultproxy()

if __name__ == '__main__':            
    login()
    #getAttachment(SITE_HEADER + 'attachment.php?aid=1388706&noupdate=yes', 'attText1.gif')
    #getLinks([SITE_HEADER + 'thread-3269681-1-5.html', ])
    #getLinks(open('raw/attachTorrent.html').read())
    tgetLinks('http://64.120.179.165/forum/thread-3384142-1-1.html')
    #pagelinks = getPages()
    #attlinks = getLinks([SITE_HEADER + 'thread-3350257-1-1.html'])



