﻿#!/usr/bin/env python
#coding: utf-8

import sys
import os
import math
import urllib

from lib.core.log import logger as log
from lib.core.cache import serialize,deserialize
from lib.core.threadpool import *
from lib.models.column import Column
from lib.models.request import Request
from lib.parse.http import *
from lib.parse.xmlproc import *

#print sys.getdefaultencoding()
reload(sys)
sys.setdefaultencoding('utf-8')
#print sys.getdefaultencoding()

class Crawl():
    _colList = {}
    _xml = None
    _http = None
    _enableMultithreading = False
    _cahceDic = None

    def __init__(self, columnList, savePath, limit, threadCount, Multithreading=False):
        self._colList = columnList
        self._savePath = savePath
        self._http = HttpParse()
        self._limit = limit
        self._threadCount = threadCount
        self._enableMultithreading = Multithreading
        self._cahceDic = deserialize()
        
    # start
    def run(self):
        #print self._cahceDic
        #return 1
        if self._colList!=None and len(self._colList) > 0:           
            self._threadPool = ThreadPool(self._threadCount)
            print self._enableMultithreading
            #return 1
            if self._enableMultithreading:#多线程
                for col in self._colList:
                    self._threadPool.add_job( self._crawlReady,self._colList[col],1)
                    time.sleep(0.2)
                self._threadPool.wait_for_complete()
            else:
                for col in self._colList:
                    self._crawlReady(self._colList[col],None)
            serialize(self._cahceDic)
        else:
            log.warning("Not found column list.")
			
	# Ready
    def _crawlReady(self, column, args):
        try:
            if args != None:
                col = column[0]
            else:
                col = column
            url = "%sindex.html" % col.url
            log.info("Current column:[%s], url:[%s]" % (col.name, col.url))
            #得到当栏目记录集总素
            col.page_record_total = self._http.getInnerText(Request(col.url, 5), col.page_total_regex)
            if col.page_record_total:
                #栏目总页数
                col.page_total = int(math.ceil(int(col.page_record_total) / int(col.page_size)))
                log.info("Page total:%s, Page size:%s, Record total:%s." % (col.page_total, col.page_size, col.page_record_total))            
                for currPage in range(1, col.page_total + 1): ###循环总页数，批量采集(regex/item@name=list)###
                    log.info(("Current page:%s, Path:%s" % (currPage, url)))
                    #图片ul标签所在的div块   
                    imgDiv = self._http.getInnerText(Request(url, 5), col.regex["list"][0])
                    log.debug(imgDiv)
                    url = "%s%s" % (col.url, col.page_file.replace("\d+", str(currPage)))                
                    if imgDiv:
                        #匹配出当前ul中li中的所有a标签
                        imgList = col.regex["list"][1].findall(imgDiv)
                        #每篇文章的美女图片单独存放一个文件夹。todo: 使用文章标题创建文件夹
                        dirnum = 0 
                        for href,title in imgList: ###当前页中的所有文章(regex/item@name=show)###	
                            dirnum += 1
                            if not href.startswith('http://'):
                                href = "%s%s" % (col.domain, href)
                            log.info("title:%s, href:%s" % (title.decode('utf-8'),href))
                            #开始序列化字符串记录
                            if self._cahceDic:
                                #如果命中cache，那么就停止当前栏目图片抓取
                                cachecol = self._cahceDic.get(col.name)
                                if cachecol!=None and title.decode('utf-8') == cachecol:
                                    log.info("Found cache, exit current column(%s) crawl." % col.name)
                                    return 1
                            if dirnum == 1:
                                log.info("cache column:{'%s':'%s'}" % (col.name, title.decode('utf-8')))
                                self._cahceDic[col.name] =  title.decode('utf-8')
                            #保存路径./pic/jiangyan/20140917/page-001/1/xxx.jpg
                            col.save_path = '%s/%s/%s/%s/' % (self._savePath, \
                                                              col.name,\
                                                              ("page-%s" % str(currPage).zfill(3)), \
                                                              ("%s.%s" % (str(dirnum).zfill(2), title.decode('utf-8'))))
                            log.info("Save path:%s" % col.save_path)
                            if not os.path.exists(col.save_path):
                                os.makedirs(col.save_path)				
                            html = self._http.getHtml(Request(href, 5))
                            #找到具体某一文章图片分页div块
                            imgDiv = col.regex["show"][0].findall(html)
                            if imgDiv:
                                imgCount = int(imgDiv[0])
                                log.info("Found %s pictures." % imgCount)
                                #使用最后一页的文章，所有图片地址都在js中封装
                                detailUrl = href.replace(".html","-%s.html" % imgCount)
                                html = self._http.getHtml(Request(detailUrl, 5))
                                #匹配图片的js代码块
                                imgDiv = col.regex["detail"][0].search(html)
                                #log.debug(imgDiv)
                                if imgDiv:
                                    imgList = col.regex["detail"][1].findall(imgDiv.group())
                                    log.info(imgList)
                                    imgId = 0
                                    tp = ThreadPool(self._threadCount)                                
                                    for img in imgList:					
                                        if self._limit > 0 and self._limit == imgId:				
                                            break
                                        imgId += 1
                                        log.debug("%s" % img)
                                        log.info("[%s]Save file success. %s%s, thread id:%s" % (imgId,col.save_path,os.path.basename(img),threading.currentThread().getName()))
                                        if self._enableMultithreading:
                                            tp.add_job( self._getDownImg,imgId,img.replace("big","pic"),("%s%s" % (col.save_path,os.path.basename(img))))
                                        else:
                                            urllib.urlretrieve(img.replace("big","pic"),("%s%s" % (col.save_path,os.path.basename(img))))
                                    if self._enableMultithreading:
                                        tp.wait_for_complete()
        except NameError,Msg:
            log.error("%s,%s" % (NameError,Msg))
            raise
            
    #下载图片       
    def _getDownImg(self,url,args):
        try:
            urllib.urlretrieve(url[1],url[2])
        except NameError,Msg:
            log.error("%s%s" % (NameError,Msg))	