# -*- coding: utf-8 -*-

import re
__author__="Poplar"
__date__ ="$2010-2-20 2:16:49$"

import urllib2
import urllib
import os.path
import types
from BeautifulSoup import BeautifulSoup

def decode(html):
    return html#.decode('euc-jp').encode('utf-8')

def findFolderName(soup):
    return soup.find('title').string.split(':')[-1]

def buildfileName(filePath, fileName):
    return filePath.join(fileName)

def writeLog(filePath, result):
    try:
        op = open(filePath + 'log.txt', "wb")
        op.write(result)
    except:
        op.close()

def download(url, downloadFolder):
    result = url + "\n"
    alldone = 1;
    try:
        page = urllib2.urlopen(url)
    except:
        msg = '[ failed, maybe is 404 error ]'
        return msg
    else:
        result += '[ success ]\n'
        print '[ success ]'
        html = decode(page.read())
        soup = BeautifulSoup(html)
        name = findFolderName(soup)
        rp = soup.find('div', attrs={"class":"main"}).a.string
        links = soup.findAll('a', href=re.compile('^http://image\.blog\.livedoor\.jp/zamen1.+?\.jpg$'))
        if(len(links) == 0): return '[ null ]'
#        filePath = downloadFolder + url.split('/')[-1]+"(" + str(len(links)) + "P)" + "/"
        filePath = downloadFolder + name +"(" + str(len(links)) + "P)" + "/"
        if not os.path.isdir(filePath):
            os.mkdir(filePath)
        if str(type(rp)) == "<class 'BeautifulSoup.NavigableString'>":
            filePath += re.sub('[\*:/\\\?"<>\|]', ' ', rp) + '/'
            if not os.path.isdir(filePath):
                os.mkdir(filePath)
        time = 0
        for a in links:
            if year == 2007 and month == 1:
                continue
            url = a['href']
            try:
                time += 1
                fileName = '%03d.jpg' %(time) #url.split('/')[-1]
                urllib.urlretrieve(url, filePath + fileName)
                if(os.path.getsize(filePath + fileName) < 8000):
                    raise 
                msg = url + " to " + fileName + " ok!\n"
                print msg
                result += msg
            except:
                msg = url+" not ok!\n"
                print msg
                result += msg
                alldone = 0
        if(alldone) :
            msg = '[ all done ]'
        else :
            msg = '[ something wrang ]'
        print msg
        result += msg
        page.close

        writeLog(filePath, result)
        return msg

year = 2008
month = 4
baseUrls = {}

def downloadOnePage(url, downloadFolder):
    result = ""
    msg = url + '\t'
    print msg
    result += msg
    result += download(url, downloadFolder) + "\n"
    writeLog(downloadFolder + '%03d-%02d-'%(year, month), result)

if __name__ == "__main__":
    basePath = 'e:/image/blog.livedoor.jp/zamen2/'
    for year in range(2008, 2009):
        for month in range(11, 13):
            beginurl = 'http://blog.livedoor.jp/zamen1/archives/%d-%02d.html' %(year, month)
#    beginurl = 'http://blog.livedoor.jp/zamen1/archives/%d-%02d.html' %(year, month)
            print beginurl
            try:
                page = urllib2.urlopen(beginurl)
            except:
                msg = '[ failed, maybe is 404 error ]'
                print msg
            else:
                msg = '[ success ]'
                print msg
                html = decode(page.read())
                print 'html is ok'
                downloadPath = basePath + '%03d-%02d/'%(year, month)
                if not os.path.isdir(downloadPath):
                    os.mkdir(downloadPath)
#                suburls = re.findall(re.compile('http://blog\.livedoor\.jp/zamen1/archives/\d+?\.html'), html)
                soup = BeautifulSoup(html)
                div = soup.find('div', attrs={'class':autopagerize_page_element})
                scripts = div.findChildren('script')
                for script in scripts:
                    url = re.findall(re.compile('http://blog\.livedoor\.jp/zamen1/archives/\d+?\.html'), script.string)[0]
#                    urls = {}
    #                for url in suburls:
    #                    urls[url] = url
    #                for url in urls.keys():
    #                    if not baseUrls.has_key(url):
                    downloadOnePage(url, downloadPath)
#                    baseUrls[url] = url
                page.close
                
#                
                
                    
