#!/usr/local/bin/python
#coding=utf-8
import sys, time
from threading import Thread

sys.path.append('/harvester/ripper/source/src/')
#sys.path.append('C:\\Documents and Settings\\Administrator\\My Documents\\Aptana Studio 3 Workspace\\Ripper\\src')

'''
Created on 2011-8-29

@author: chris
'''

from ripper.core.DataTypes import createImage, createFile
from ripper.core.Item import get_item_by_name
from ripper.core.Exceptions import RipperException, DownloadException
from ripper.core.Storage import db
from ripper.core import RequestDispatcher
from ripper.handler.HttpHandler import HttpHandler
from ripper.core.DataObj import DataObj

# ----------     CONSTANTS

THREAD_INTERVAL = 1.5 # 多线程读取内容的间隔

class Engine(object):

    def __init__(self, needDownload=True):
        '''
        Constructor
        '''
        self.item = None
        self.datas = None 
        self.MAX_INDEX_PARSE = 5 # 最多一次yield多少个indexobj到处理函数
        self.needDownload = needDownload
        
    # 加载item
    def load_item_info(self, itemName):
        self.item = get_item_by_name(itemName)
        self.httpHandler = HttpHandler(self.item.get_property('resource')['root'])
            
    # 根据实体模板采集数据    
    def process_item(self, item=None, threaded=True):
        if item == None and self.item != None:
            item = self.item
        else:
            raise RipperException('Item未加载')
        
        #解析首页index
        for keyProp, otherProps, properties, savedObjList, targetCollection in self.process_index(item):
            self.process_item_selected(keyProp, otherProps, properties, savedObjList, targetCollection, threaded)
             
    
    # 解析数据页属性
    def process_item_selected(self, keyProp, otherProps, properties, savedObjList, targetCollection, threaded):
        # 解析数据页属性
        for obj in savedObjList:
            parser = self.item.get_parser()
            if threaded == False: # 单线程
                self.process_property(parser, keyProp, otherProps, properties, obj, targetCollection)
            else: # 多线程
                time.sleep(THREAD_INTERVAL)
                RequestDispatcher.request_work((self.process_property,
                                             [parser, keyProp, otherProps, 
                                              properties, obj, targetCollection]))
     
        
    # 解析单个对象
    def parse_obj(self, keyValue, itemName):
        item = get_item_by_name(itemName)
        properties = item.get_properties()
        parser = item.get_parser() # 读取列表解析器  
        #获得key属性
        keyProp = None
        for p in properties:
            if p.is_key() == True:
                keyProp = p
        #获得与列表同时解析的属性
        otherProps = []
        for p in properties:
            if p.is_in_form_list():
                otherProps.append(p)
        
        # 解析列表        
        targetCollection = item.get_property('collection')
        
        obj = db.query_one(targetCollection, {keyProp.name : keyValue})
        self.process_property(parser, keyProp, otherProps, properties, obj, targetCollection)
        
      
    #解析首页index
    def process_index(self, item):
        # 生成实体数据列表和每个实体数据的唯一标识(名称, url, etc...)
        pages = item.get_pages()
        properties = item.get_properties()
        parser = item.get_parser() # 读取列表解析器  
        # 获得首页
        pageIndex = None
        for p in pages:
            if p.is_index() == True:
                pageIndex = p
        #获得key属性
        keyProp = None
        for p in properties:
            if p.is_key() == True:
                keyProp = p
        #获得与列表同时解析的属性
        otherProps = []
        for p in properties:
            if p.is_in_form_list():
                otherProps.append(p)
        
        # 完整性检查的字段(多个)
        ccProp = []
        for p in properties:
            if p.is_check():
                ccProp.append(p)
        
        # 解析列表        
        targetCollection = item.get_property('collection')
        
        
        # 解析保存到配置的collection中kj
        savedObjList = []
        counter = 0
        # 调用解析器解析列表首页(IndexPage)
        pageCtrl = item.get_property('pageControl')
        pageStart = int(pageCtrl['start'])
        pageEnd = int(pageCtrl['end'])
        if pageEnd - pageStart == 1:
            pageEnd = pageEnd + 1
        elif pageStart ==  pageEnd : 
            pageEnd = pageStart + 1
        orginalUrl = pageIndex.url
        
        # 开始分页采集
        for pageNum in range(pageStart, pageEnd):
            pageIndex.url = parser.goto_page(orginalUrl, pageNum)
            for obj in parser.parse_obj_list(pageIndex, keyProp, otherProps, item):
                
                o = None
                
                # 检查是否存在
                if self.__checkObjExists(targetCollection, obj, keyProp) == True:
                    # 检查是否完整
                    q = self.__checkObjCompelete(targetCollection, obj , keyProp, ccProp)
                    if q == None: # 已经完整采集
                        continue
                    else:
                        o = q['_id']
                else:
                    o = db.save_item_data(targetCollection, obj)
                    
                savedObjList.append(o)
                for i in range(0, len(savedObjList)):
                    if type(savedObjList[i]) != type({}) :
                        savedObjList[i] = db.query_one(targetCollection, {'_id' : savedObjList[i]})
                
                counter = counter + 1
                if counter == self.MAX_INDEX_PARSE:
                    counter = 0
                    savedObjList2 = savedObjList
                    savedObjList = []
                    yield keyProp, otherProps, properties, savedObjList2, targetCollection
                    
            if savedObjList != []:        
                yield keyProp, otherProps, properties, savedObjList, targetCollection
            
    # 处理采集项目       
    def process_property(self, parser, keyProp, otherProps, properties, obj, targetCollection):
        
        # 详细页面入口
        detailUrl = ''
        entry = self.__getEntryProperty(properties, obj)
        if entry == None:
            detailUrl = obj[keyProp.name]
        else:
            detailUrl = entry
        
        for p in properties:
            if p in [keyProp, ] or p in otherProps : continue
            func = getattr(parser, 'get_' + p.name)
            obj[p.name] = func(detailUrl, p, obj)
            if obj[p.name] == None : continue
            
            # 图片类型数据
            if p.type in ['image', ]:
                if p.has_fake() == True:
                    obj[p.name] = createImage(obj[p.name], int(p['fakeLength']))
                else:
                    obj[p.name] = createImage(obj[p.name])
                
            # 图片类型数据[]
            if p.type in ['image[]', ]:
                imgs = obj[p.name]
                obj[p.name] = []
                for img in imgs:
                    bimg = createImage(img)
                    if p.has_fake() == True:
                        bimg = createImage(obj[p.name], int(p['fakeLength']))
                    obj[p.name].append(bimg)
            
            # file类型数据
            if p.type in ['file', ]:
                obj[p.name] = createFile(obj[p.name])
                
            # file类型数据[]
            if p.type in ['file[]', ]:
                imgs = obj[p.name]
                obj[p.name] = []
                for img in imgs:
                    bimg = createFile(img)
                    obj[p.name].append(bimg)
                    
            # 数字类型
            if p.type in ['float', 'int', 'double']:
                try:
                    obj[p.name] = float(obj[p.name])
                except Exception:
                    pass
                
                
            db.save_item_data(targetCollection, obj) 
            
        # 下载附件
        if self.needDownload == True:
            self.download_content(obj)
            
        return
    
    # 读取入口字段, 若不存在, 使用key_property作为入口字段
    # 不存在返回空, 存在返回url
    def __getEntryProperty(self, props, obj):
        for p in props:
            if p.is_entry() == True:
                return obj[p.name]
        return None
    
    # 检查是否存在
    def __checkObjExists(self, targetCollection, obj, keyProp):
        pname = keyProp.name
        q = db.db[targetCollection].find({pname : obj[pname]}).count()
        if q > 0 :
            return True
        return False
    
    # 检查是否完整， 完整返回None
    def __checkObjCompelete(self, targetCollection, obj, keyProp, ccProp):
        if ccProp == []:
            raise RipperException('缺少完整性检查字段')
        pname = keyProp.name
        q = db.db[targetCollection].find_one({pname : obj[pname]})
        if q.has_key('collectDate'):
            
            # 逐个检查属性是否完整
            isOK = True
            for c in ccProp:
                if q.has_key(c.name):
                    if q[c.name] == None:
                        isOK = False
                else:
                    isOK = False
            if isOK:
                if self.needDownload == True:
                    self.download_content(q)
                return None
                
        else :
            return q
        
        return q
    
    # 下载采集项目属性(所有)
    def download_property(self, direction=-1):
        # 下载所有数据表里面的记录数据
        properties = self.item.get_properties(raw=True)
        propsNeedDownload = []
        
        # 找出需要下载的属性类型
        for p in properties:
            if p['type'] in ['image', 'image[]', 'file', 'file[]']:
                propsNeedDownload.append(p)
                
        # 加载数据列表
        objs = db.db[self.item.get_property('collection')].find().sort('_id' ,  direction)
        for obj in objs:
            for prop in propsNeedDownload:
                data = DataObj(obj, prop, self.item, self.httpHandler)
                RequestDispatcher.request_work( (data.download, []) )
                
    # 下载附件                (单个采集数据对象)
    def download_content(self, obj):
        # 下载所有数据表里面的记录数据
        bproperties = self.item.get_properties(raw=True)
        propsNeedDownload = []
        
        # 找出需要下载的属性类型
        for p in bproperties:
            if p['type'] in ['image', 'image[]', 'file', 'file[]']:
                propsNeedDownload.append(p)
        for prop in propsNeedDownload:
                data = DataObj(obj, prop, self.item, self.httpHandler)
                RequestDispatcher.request_work( (data.download, []) )
                
          
# 收集内容                
def get_content(itemName, needDownload):
    engine = Engine(needDownload)
    engine.load_item_info(itemName) 
    engine.process_item()
    
# 根据收集的内容下载文件
def get_files(itemName, direction):
    print 'direction', direction
    engine = Engine()
    engine.load_item_info(itemName) 
    engine.download_property(direction=int(direction))
    
# test
def get_obj( keyValue, itemName):
    engine = Engine()
    engine.load_item_info(itemName) 
    engine.parse_obj(keyValue, itemName)
    
if __name__ == '__main__':
    
    '''
        Engine.py aisex_game 1
    '''
    
    THREAD_NUM = 50
    jobType = 1
    itemName = 'aisex_asia'
    isBatch = 0
    items = []
    direction = -1
    needDownload = True # 是否在解析时下载可下载的字段
    
    if len(sys.argv) > 1:
        itemName = sys.argv[1]
    if len(sys.argv) > 2:
        jobType = int(sys.argv[2])
    if len(sys.argv) > 3:
        direction = int(sys.argv[3])
    if len(sys.argv) > 4:
        needDownload = False
    
    # 批量并行任务
    if ',' in itemName :
        items = itemName.split(',')
        
            
    print 'loading ', itemName    , '.....'
    while True:
        
        RequestDispatcher.make_and_start_thread_pool(number_of_threads_in_pool=THREAD_NUM)
        
#        get_obj('http://www.aisex.com/bt/htm_data/5/1109/484254.html', itemName)
        
        if jobType == 1:
            print 'loading Content'
            if items != []:
                for item in items:
                    print 'start ', item
                    Thread(target=get_content, args=(item, needDownload)).start()
            else:
                get_content(itemName, needDownload)
        else:
            print 'loading Attachments'
            if items != []:
                for item in items:
                    print 'start file ', item
                    Thread(target=get_files, args=(item,direction)).start()
            else:
                get_files(itemName, direction)
            

#        Thread(target=get_content, args=('aisex_asia',)).start()
#        Thread(target=get_content, args=('aisex_euro',)).start()
#        Thread(target=get_content, args=('aisex_hentai',)).start()
#        Thread(target=get_content, args=('aisex_game',)).start()
#        
#        RequestDispatcher.show_all_results()    
        time.sleep(3000)
#        RequestDispatcher.stop_and_free_thread_pool()
        
        
        
        
        