package com.legleg.run.crawler.parser;

import com.legleg.data.Album;
import com.legleg.data.Feed;
import com.legleg.util.BaseUtil;
import com.legleg.util.HtmlParserUtil;
import org.apache.commons.lang.StringUtils;

import java.util.ArrayList;
import java.util.List;

/**
 * Created with IntelliJ IDEA.
 * User: Administrator
 * Date: 12-12-28
 * Time: 下午1:25
 * for www.umei.cc
 */
public class UMeiParser implements BaseParser{
    @Override
    public List<Album> getAlbumUrlList(Feed feed) {
        List<Album> ret;
        String content = HtmlParserUtil.getHtml(feed.getFeedurl(),null);
        ret = HtmlParserUtil.getInstance().parseLinks(content,"//div[@id='msy']//div[@class='t']/a[last()]");
        if(ret!=null && ret.size()>0){
            for(Album album:ret){
                album.setCrawledfeed(feed.getId());
                album.setCrawledsite("umei.cc");
            }
        }
        return ret;
    }

    @Override
    public Album getAlbumDetail(Album album) {
        if(album==null || album.getPageurl()==null) return null;
        String content = BaseUtil.getUrlString(album.getPageurl(), "GBK");
        int pagenum = getMaxPage(content);
        String title = HtmlParserUtil.getInstance().parseTextByTag(content, "//div[@class='pageheader entrypage']/h2");
        String category = HtmlParserUtil.getInstance().parseTextByTag(content,"//div[@class='pageheader entrypage']/p[@class='make_time']/a");
        List<String> tags = HtmlParserUtil.getInstance().parseTextListByXpath(content, "//div[@class='box tags']/ul/li");
        List<String> allPics = new ArrayList<String>();
        List<String> page1_pics = getPicUrls(content);
        if(page1_pics!= null && page1_pics.size()>0) allPics.addAll(page1_pics);
        if(pagenum>1){
            String tmp_pageurl = null;
            String tmp_content = null;
            for(int i=2;i<=pagenum;i++){
                tmp_pageurl = album.getPageurl().replace(".htm","_"+i+".htm");
                tmp_content =  BaseUtil.getUrlString(tmp_pageurl, "GBK");
                List<String> pics = getPicUrls(tmp_content);
                if(pics!=null && pics.size()>0)
                    allPics.addAll(pics);
            }
        }

        System.out.println("===================================================");
        System.out.println(album.getPageurl());
        System.out.println(title);
        System.out.println(category);
        System.out.println(StringUtils.trimToEmpty(StringUtils.join(tags.iterator(),",")));
        System.out.println(pagenum);
        System.out.println(allPics.size()+" : "+allPics.toString());

        album.setTitle(StringUtils.trimToEmpty(title));
        album.setCategory(StringUtils.trimToEmpty(category));
        album.setTags(StringUtils.trimToEmpty(StringUtils.join(tags.iterator(),",")));
        album.setPicnum(allPics==null?0:allPics.size());
        album.setPicurls(allPics);



        return album;
    }

    private List<String> getPicUrls(String content){
        return HtmlParserUtil.getInstance().parseAttrListByXpath(content,"//div[@id='IMG_Zoom']/div[@class='img_box']//img","src");
    }

    private int getMaxPage(String content){
        List<String> pages = HtmlParserUtil.getInstance().parseTextListByXpath(content, "//div[@id='pagination']/div[@class='pages']/a");
        int ret = 1;
        for(String page:pages){
            try {
                ret = Integer.valueOf(StringUtils.trimToEmpty(page));
            } catch (NumberFormatException e) {
            }
        }
        return ret;
    }

    public static void main(String[] args) {
/*       UMeiParser parser = new UMeiParser();
       List<Album> urls = parser.getAlbumUrlList("http://www.umei.cc/tags/meinv.htm");
        for(Album al:urls){
           parser.getAlbumDetail(al);
        }*/
    }

}
