# -*- coding: utf-8 -*- 

import urllib2
import cookielib
from BeautifulSoup import BeautifulSoup,SoupStrainer
import time
import pyodbc
import re

import sys
default_encoding = 'utf-8'

if sys.getdefaultencoding() != default_encoding:
    reload(sys)
    sys.setdefaultencoding(default_encoding)

RUNDATE=''#运行日期


def scrapyEngine(url_tuple):
    #mydatabase=MyDatabaseTmall()
    scheduler=Scheduler()#队列
    
    for t in url_tuple:
        task=Task()
        task.getfromtuple(t)
        scheduler.add_pre(task)
        
    while scheduler.count>0:
        curtask=scheduler.use()#从队列中取得任务
        mydownloader=downloader_middlewares(curtask)#取得下载件组
        for j in mydownloader.netreturns:#遍列下载件组
            myspider=spiders_middlewares(j)#处理一个下载件，取得爬虫
            if myspider!=None:#如果存在爬虫结果，执行
                if len(myspider.tasks)!=0:#将爬虫对象中的任务放到任务队列中去
                    scheduler.addlist(myspider.tasks)
                if len(myspider.products)!=0:#将爬虫对象中的产品组放到清洗件中去
                    for curproduct in myspider.products:
                        sql=curproduct.getInsertSQL()
                        print sql
                        #mydatabase.runsqlsolo(sql)
            #break
        
class Scheduler(object):#队列
    def __init__(self):
        self.tasks=[]
        self.count=0
    def add(self,task):
        self.tasks.append(task)
        self.count+=1
        print 'add '+task.url
    def add_pre(self,task):
        t=[task]
        self.tasks=t+self.tasks
        self.count+=1
        print 'add '+task.url
    def addlist(self,tasklist):
        for i in tasklist:
            self.add_pre(i) 
    def use(self):
        if self.count>0:
            curtask=self.tasks[0]
            self.tasks.pop(0)
            self.count-=1
            print 'use '+curtask.url
            return curtask
        else:
            return None
        
class Task(object):#队列中任务
    def __init__(self,taskname=None,url=None,cate=None,icookie=None,header_dict=None,id=None):
        self.taskname=taskname
        self.url=url
        self.cate=cate
        self.cookie=icookie#如果存在下载类需要加载
        self.header=header_dict#如果存在下载类需要加载
        self.id=id#执行任务时需要参考的参数
        
    def getfromtuple(self,ituple):
        self.url=ituple[0]
        self.taskname=ituple[1]
        self.cate=ituple[2]
            
        
####################################################
def downloader_middlewares(task):#下载中间件，需配制，选择合适的下载类
    refDict={'tmall_search_product':Downloader_tmall_search_product,
             'tmall_ID':Downloader_tmall_ID,
             'tmall_ID_judge':Downloader_tmall_ID_judge,
             }
    print task.url
    print task.cate
    mydownloader=refDict.get(task.taskname)(task)
    return mydownloader

def spiders_middlewares(netreturn):#爬虫中间件，需配制，选择合适的爬虫类
    refDict={'tmall_search_product':Spiders_tmall_search_product,
             'tmall_ID':Spiders_tmall_ID,
             'tmall_ID_judge':Spiders_tmall_ID_judge
             }
    myspider=refDict.get(netreturn.name)(netreturn)
    return myspider

############################################
class NetReturn(object):#下载后返回的页面,一个页面一个对象
    def __init__(self):
        self.name=None#用来判断由哪一个爬虫来解释
        self.task=None#来自哪一个任务,其中按fromwhere,cate,id,sku的层次记录
        self.contents=None#返回的页面
        self.cookie=None#返回需要下一次任务使用的cookie
        
class Downloader(object):#下载类基类
    def __init__(self):
        self.netreturns=[]#用来记录返回的下载件组
        
class Downloader_tmall_search_product(Downloader):#下载天猫产品搜索页,task.fromewhere,task.cate is need
    def __init__(self,task):
        Downloader.__init__(self)
        self.__go(task)
        
    def __go(self,task):
        netreturn=NetReturn()
        netreturn.task=task
        netreturn.name='tmall_search_product'
        #netreturn.cate=task.cate
        if task.cookie==None:
            cj = cookielib.CookieJar()
        else:
            cj=task.cookie
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        contents=opener.open(urllib2.Request(task.url)).read()
        netreturn.contents=contents.decode('gbk').encode('utf-8')
        netreturn.cookie=cj
        self.netreturns.append(netreturn)

class Downloader_tmall_ID(Downloader):#下载天猫ID页,task.fromewhere,task.cate,task.id is need
    def __init__(self,task):
        Downloader.__init__(self)
        self.netreturns=[]
        self.__go(task)
    
    def __go(self,task):
        netreturn_main=NetReturn()
        netreturn_main.task=task
        netreturn_main.name='tmall_ID'
        if task.cookie==None:
            cj = cookielib.CookieJar()
        else:
            cj=task.cookie
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        while True:
            try:
                res=opener.open(urllib2.Request(task.url))
            except IOError,e:
                f=open('Errorlog.txt','a')
                f.write(task.url)
                f.close
                break
            contents=res.read().decode('gbk').encode('utf-8')
            #不明原因，待查，与网速有一定的关联
            if'g' in contents:
                netreturn_main.contents=contents
                netreturn_main.id=task.url[-11:]
                self.netreturns.append(netreturn_main)
                break
            else:
                f=open('Errorlog.txt','a')
                f.write(task.url)
                f.close
                break
            
class Downloader_tmall_ID_judge(Downloader):
    def __init__(self,task):
        Downloader.__init__(self)
        self.netreturns=[]
        self.__go(task)
    
    def __go(self,task):
        netreturn=NetReturn()
        netreturn.task=task
        netreturn.name='tmall_ID_judge'
        
        if task.cookie==None:
            cj = cookielib.CookieJar()
        else:
            cj=task.cookie
            
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        req=urllib2.Request(task.url)
        for t,v in task.header.items():
            req.add_header(t, v)
        contents=opener.open(req).read()
        netreturn.contents=contents.decode('gbk').encode('utf-8')
        netreturn.cookie=cj
        self.netreturns.append(netreturn)
        
class Spider(object):#爬虫类基类
    def __init__(self):
        self.products=[]#返回后的产品组
        self.tasks=[]#返回后的任务组
        
class Spiders_tmall_search_product(Spider):#处理天猫产品搜索页
    def __init__(self,netreturn):
        Spider.__init__(self)
        global RUNDATE#用来记录任务的时间
        
        #天猫的页面编码为GBK，BeautifulSoup默认使用为utf8．
        contents=netreturn.contents
        
        try:
            links2=SoupStrainer('div',{'class':'ui-page-wrap'})
            soup2 = BeautifulSoup(contents,links2)
            urlnext=soup2.find('a',{'class':'ui-page-next'})['href'][:-9]
            urlnext='http://list.tmall.com/search_product.htm'+urlnext
            
            curtask=Task()
            curtask.url=urlnext
            curtask.cate=netreturn.task.cate
            curtask.taskname='tmall_search_product'
            curtask.cookie=netreturn.cookie
            self.tasks.append(curtask)
        except Exception,e:
            pass
        #print contents
        links=SoupStrainer('div',{'class':'view  grid-34  clearfix'})
        soup = BeautifulSoup(contents,links)
        for detailitem in soup.findAll('div',{'class':'product'}):
            curtask2=Task()
            curtask2.taskname='tmall_ID'
            curtask2.url='http://detail.tmall.com/item.htm?id='+detailitem['data-id']
            curtask2.cate=netreturn.task.cate
            curtask2.id=detailitem['data-id']
            self.tasks.append(curtask2)
            
class Spiders_tmall_ID(Spider):#处理ID页
    def __init__(self,netreturn):
        Spider.__init__(self)
        global RUNDATE
        commodity=Commodity()
        commodity.id=netreturn.task.id
        commodity.cate=netreturn.task.cate
        #print 'ok'
        contents=netreturn.contents
        #print contents
        soup=BeautifulSoup(contents)
        #print soup
        #print str(soup.find('div',{'class':'tb-detail-hd'}).h3.contents[0])
        s=str(soup.find('div',{'class':'tb-detail-hd'}).h3.contents[0])#.encode('utf-8')
        
        regex = re.compile(r'[\n\r\t]')
        commodity.name=regex.sub('', s)
        commodity.href='http://detail.tmall.com/item.htm?id='+commodity.id
        commodity.shop=soup.find('a',{'class':'slogo-shopname'}).contents[0]
        self.products.append(commodity)
        #print contents
        #x=re.search('"rateConfig":{"itemId":"(.*?),.*?"sellerId":(.*?),"spuId":(.*?)}',contents)
        
        
        
        curtask=Task()
        x=re.search('"rateConfig":{"itemId":(.*?),.*?"sellerId":(.*?),"spuId":(.*?)}',contents)
        ratelink='http://rate.tmall.com/list_detail_rate.htm?itemId='
        ratelink+=x.group(1)+'&spuId='+x.group(3)+'&sellerId='+x.group(2)
        ratelink+='&order=1&append=0&content=1&tagId=&posi='+'&callback=jsonp4629'+'&currentPage=1'
        curtask.taskname='tmall_ID_judge'
        curtask.url=ratelink
        curtask.cookie=netreturn.cookie
        curtask.header={'referer':commodity.href}
        curtask.cate=netreturn.task.cate
        self.tasks.append(curtask)
        
class Spiders_tmall_ID_judge(Spider):#处理ID页
    def __init__(self,netreturn):
        Spider.__init__(self)
        #print netreturn.contents
###############################################
class Product(object):
    def __init__(self):
        ''
        
    def getInsertSQL(self):#生成插入到数据库的SQL
        tablename=self.__class__.__name__
        sql_1='insert into '
        sql_2=') values('
        idict=self.__dict__
        for i,j in idict.items():
            if j!=None:
                print j
                sql_1+=i+','
                sql_2+="'"+j+"',"
        sql=sql_1[:-1]+sql_2[:-1]+')'
        return sql         

class Commodity(Product):
    def __init__(self):
        Product.__init__(self)
        self.id=None
        self.fromwhere=None
        self.shop=None
        self.name=None
        self.href=None
        self.cate=None
        self.brand=None
            
class Customer(Product):
    def __init__(self):
        self.fullname=None#全名
        self.sex=None
        self.medal=None#勋章
        self.userid=None
        self.homelink=None
        self.province=None
        self.city=None
        self.email=None
        self.qq=None
        
class CustomerBuyRecord(Product):#仅用来参考
    def __init__(self):
        self.fullname=None#全名
        self.guessname=None#简化名字
        self.vip=None#会员等级
        self.credit=None#信用等级
        self.buycomidityid=None
        self.buycomidityfullname=None
        self.buytime=None
        self.buyprice=None
        
class CustomerJudge(Product):#筛选出部分有用的用户名
    def __init__(self):
        self.fullname=None#全名
        self.guessname=None#简化名字
        self.vip=None#会员等级
        self.credit=None#信用等级
        self.buycomidityid=None
        self.buycomidityfullname=None
        self.buycomidityjudge=None
        self.buycomidityjudgetime=None
        

        
        
        
########################################
class MyDatabaseTmall(object):
    def __init__(self):
        #self.conn=conn=pyodbc.connect('DRIVER={SQL Server};SERVER=112.4.151.73;DATABASE=ZCT_important_climber;UID=sigo;PWD=sigo;charset="utf8";')
        self.conn=conn=pyodbc.connect('DRIVER={SQL Server};SERVER=localhost;DATABASE=tmall_home2;UID=sa;PWD=08034615;')
        self.cursor = conn.cursor()
        self.sqllist=[]
        self.sqlcount=0
        #print 'ok'
        
    def addsql(self,sql):
        self.sqllist.append(sql)
        self.sqlcount+=1
    
    def runsqllist(self):
        for sql in self.sqllist:
            self.runsqlsolo(sql)
        
    def runsqlsolo(self,sql):
        self.cursor.execute(sql.decode('utf-8').encode('gbk'))
        self.conn.commit()
        
    def fetchdata(self,sql):
        #print sql
        self.cursor.execute(sql)
        ituple=self.cursor.fetchall()
        return ituple
    
    def getfielddata(self,fieldname_tuple,table):
        sql='select '
        for i in fieldname_tuple:
            sql+=i+','
        #print sql
        sql=sql[:-1]+' from '+table
        #print sql
        ituple=self.fetchdata(sql.decode('utf-8').encode('gbk'))
        return ituple
        
##################################################


if __name__=='__main__':
    ituple=(('http://list.tmall.com/search_product.htm?cat=50074934&style=g&search_condition=7&from=sn_1_cat&active=1&industryCatId=50074933','tmall_search_product','隐形眼镜'),
            ('http://list.tmall.com/search_product.htm?cat=50074935&style=g&search_condition=7&from=sn_1_cat&active=1&industryCatId=50074933','tmall_search_product','护理液'),
            ('http://list.tmall.com/search_product.htm?cat=50074936&style=g&search_condition=7&from=sn_1_cat&active=1&industryCatId=50074933','tmall_search_product','彩色隐形眼镜'),
            ('http://list.tmall.com/search_product.htm?cat=50106126&style=g&search_condition=7&from=sn_1_cat&active=1&industryCatId=50074933','tmall_search_product','隐形眼镜伴侣'),
            )
    if RUNDATE=='':
        RUNDATE=time.strftime('%Y-%m-%d', time.localtime())
        print RUNDATE
    scrapyEngine(ituple)