# -*- coding: utf-8 -*- 
import urllib2
import cookielib
import time

from m_main import ScrapyEngine2,Scheduler,Task
from m_database import MyDatabaseTmall
from m_product import Product
from m_spider import Spider
from m_downloader import Downloader,NetReturn

if __name__=='__main__':
    #此项任务量大，几乎不可能全部完成，所以采用循环进行
    while True:
        
        ilist=[]
        mytuple=[]
        '''
        db=MyDatabaseTmall()
        mytuple=db.fetchdata('select userid,usercode,fullname from customer where usetag is null and usercode is not null order by rand() limit 200')
        print 'ok'
        '''
        mytuple.append('kaixinjiuhao')
        for x in mytuple:
            mytask=Task()
            mytask.taskname='customer_qq_weibo'
            mytask.to_downloader='Downloader_QQ_WeiBo'
            mytask.to_spider='Spiders_Find_QQ_WeiBo_customer'
            mytask.url='http://t.qq.com/'+x
            ilist.append(mytask)
        scrapyEngine=ScrapyEngine2(ilist)
