﻿#!/usr/bin/perl

#Function : 批量从豆瓣相册下载图片
#Version  : 3.0
#Date     : 2011-04-07 14:05
#Ecode    ；UTF-8
#About    : NOT PERFECT
use strict;
use warnings;
use URI;
use Web::Scraper;
use Encode;
use LWP::Simple;
use HTML::TreeBuilder;
use File::Basename;
use utf8;
use Win32::FindFile qw( CreateDirectory );

#豆瓣相册的地址    
my $URL         = "http://www.douban.com/photos/album/46286584/";
#文件下载到的文件夹的地址
my $FILEDIR     = "I:/IMAGE/";

#判断相册的页数
sub vailpage {
    my (%link, $worker, $result, $root);
    
    $worker = scraper {
        process ".paginator", "link[]" => "HTML";
    };
    
    $result = $worker->scrape( URI->new($URL) );
    
    my $tmp_link = 0;
    for my $links ( @{ $result->{link} } ) {
        $root = HTML::TreeBuilder->new_from_content($links);

        #Get the page url
        foreach my $node ($root->find_by_tag_name('a')) {
            $link{ $node->attr('href') }++;
        }
        
        #FIX : 采用的是绝对式的截取方式
        foreach my $link(sort keys %link) {
    	  		if($tmp_link < substr($link,51)) {
    	  				$tmp_link = substr($link,51);
    	  		}
    		}
    } 
    return $tmp_link;
}

#Function : 以相册名称新建文件夹
sub makedir {
		my (%link, $worker, $result, $root);
    
    $worker = scraper {
        process "/html/body/div[2]/div[2]/div/div[2]/h1", "link[]" => "TEXT";
    };
    
    $result = $worker->scrape( URI->new($URL) );
    
    for my $links ( @{ $result->{link} } ) {
			
			CreateDirectory($FILEDIR.$links);
    		my $dirtemp = $FILEDIR.encode("euc-cn",$links);
    		
    		#除去文件路径后面的空格
    		$dirtemp=~s/ +$//;
    		return $dirtemp."/";
    }
}

#主函数
#param : $URL 传入参数 : 链接地址
sub main {
    my $worker = scraper {
        process ".photolst_photo", "image[]" => "HTML";
    };

    my $result = $worker->scrape( URI->new($URL) );

    for my $image ( @{ $result->{image} } ) {
        my $root = HTML::TreeBuilder->new_from_content($image);
        my %img;
        my %pic_id;
    
        #获取图片文件名
        foreach my $node ($root->find_by_tag_name('img')) {
            $img{ $node->attr('src') }++;
        }
    
        #获取图片ID
        foreach my $pic(sort keys %img) {
            $pic_id{ basename($pic,".jpg") }++;
        }
    
        foreach my $id(sort keys %pic_id) {
        		#截取图片的ID，并得到图片链接
        		#FIX : 采用绝对方式截取
            my $id_single = substr $id, 1, 9;
            my $url_single =  "http://www.douban.com/photos/photo/$id_single/";
        
            my $worker_single = scraper {
                process ".mainphoto", "image_single[]" => "HTML";
            };
        
            my $result_single = $worker_single->scrape( URI->new($url_single) );
            for my $image_single ( @{ $result_single->{image_single} } ) {
                my $root_single = HTML::TreeBuilder->new_from_content($image_single);
                my %img_single;
                foreach my $node ($root->find_by_tag_name('img')) {
                    $img_single{ $node->attr('src') }++;
                }
            
            		print $URL."\n";
                #开始下载图片
                foreach my $picmin(sort keys %img_single) {
                    
                    my $filedir = $FILEDIR.encode("utf8", basename($picmin));
                    my $pic_url = "http://img3.douban.com/view/photo/photo/public/".basename($picmin);
                    my $status = getstore($pic_url, $filedir);
                    
                    die "Error $status on $picmin" unless is_success($status);
                    print $filedir."\n";
                    print $pic_url."\n";
                }
            }
        }
    }
}

$FILEDIR = &makedir;
my $lovelink = &vailpage;
#如果相册只有一页
if($lovelink == 0) {
    &main;
} else {
		my $templink = $URL;
		
		#根据页数的间隔数进行链接更换，得到当前链接的图片地址，转到图片地址将图片下载
		for(my $i=0; $i <= $lovelink; $i += 18) {
				$URL = $templink."?start=$i";
				&main;
		}
}