# -*- coding: utf-8 -*- 
import urllib2
import cookielib
import time
from BeautifulSoup import BeautifulSoup,SoupStrainer

from m_main import Scheduler,Task
from m_database import MyDatabaseTmall
from m_product import Customer_QQ_WeiBo
from m_spider import Spider
from m_downloader import Downloader,NetReturn

class ScrapyEngine2(object):
    def __init__(self,task_list):
        mydatabase=MyDatabaseTmall()
        scheduler=Scheduler()#队列
        
        for t in task_list:
            scheduler.add_pre(t)
            
        while scheduler.count>0:
            curtask=scheduler.use()#从队列中取得任务
            mydownloader=eval(curtask.to_downloader)(curtask)#取得下载件组
            for j in mydownloader.netreturns:#遍列下载件组
                myspider=eval(j.task.to_spider)(j)#处理一个下载件，取得爬虫
                if myspider!=None:#如果存在爬虫结果，执行
                    if len(myspider.tasks)!=0:#将爬虫对象中的任务放到任务队列中去
                        scheduler.addlist(myspider.tasks)
                    if len(myspider.products)!=0:#将爬虫对象中的产品组放到清洗件中去
                        for curproduct in myspider.products:
                            sql=curproduct.getInsertSQL()
                            print sql
                            '''
                            try:
                                mydatabase.runsqlsolo(sql)
                            except:
                                time.sleep(30)
                                mydatabase.runsqlsolo(sql)
                            '''


class Downloader_QQ_WeiBo(Downloader):
    def __init__(self,task):
        Downloader.__init__(self)
        self.__go(task)
        
    def __go(self,task):
        netreturn=NetReturn()
        netreturn.task=task
        cj = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        req=urllib2.Request(task.url)
        if task.header!=None:
            for t,v in task.header.items():
                req.add_header(t,v)
        try:
            contents=opener.open(req).read()
        except:
            time.sleep(3)
            contents=opener.open(req).read()
        netreturn.contents=contents
        self.netreturns.append(netreturn)

class Spiders_Find_QQ_WeiBo_customer(Spider):
    def __init__(self,netreturn):
        Spider.__init__(self)
        contents=netreturn.contents
        print contents
        
        customer=Customer_QQ_WeiBo()
        customer._insertupdate='update'
        
        links=SoupStrainer('div',{'id':'LUI_wide'})
        soup = BeautifulSoup(contents,links)
        
        customer.fullname=soup.find('a',{'class':'text_user'}).contents[0]
        customer.usercode=netreturn.task.addin_data['usercode']
        customer.address=soup.find('a',{'boss':'btnApolloCity'}).contents[0]
        customer.Constellation=soup.find('a',{'boss':'btnApolloStar'}).contents[0]
        customer.sex=('男' if soup.find('span',{'class':'info'}).i['class']=='ico_male' else '女')
        customer.usetag='y'
        self.products.append(customer)
        
        
        
        
        
        
        
if __name__=='__main__':
    ilist=[]


    mytask=Task()
    mytask.taskname='comidity_keede'
    mytask.to_downloader='Downloader_Common'
    mytask.to_spider='Spiders_Find_QQ_WeiBo_customer'
    mytask.url='http://t.qq.com/'+x
    mytask.addin_data={'usercode':x}
    ilist.append(mytask)
    scrapyEngine=ScrapyEngine2(ilist)
