# -*- coding: utf-8 -*- 

'''从天猫的用户关注粉丝关系网中取得用户的信息（用户名　ＩＤ和编码）'''

import urllib2
import cookielib
from BeautifulSoup import BeautifulSoup,SoupStrainer
import time
import MySQLdb
import re


#在win7系统下需要把这一段注释掉的代码启用
import sys
default_encoding = 'utf-8'

if sys.getdefaultencoding() != default_encoding:
    reload(sys)
    sys.setdefaultencoding(default_encoding)


RUNDATE=''#运行日期


def scrapyEngine(task_list):
    mydatabase=MyDatabaseTmall()
    scheduler=Scheduler()#队列
    
    for t in task_list:
        scheduler.add_pre(t)
        
    while scheduler.count>0:
        curtask=scheduler.use()#从队列中取得任务
        mydownloader=eval(curtask.to_downloader)(curtask)#取得下载件组
        for j in mydownloader.netreturns:#遍列下载件组
            myspider=eval(j.name)(j)#处理一个下载件，取得爬虫
            if myspider!=None:#如果存在爬虫结果，执行
                if len(myspider.tasks)!=0:#将爬虫对象中的任务放到任务队列中去
                    scheduler.addlist(myspider.tasks)
                if len(myspider.products)!=0:#将爬虫对象中的产品组放到清洗件中去
                    for curproduct in myspider.products:
                        sql=curproduct.getInsertSQL()
                        #print sql
                        #try:
                        mydatabase.runsqlsolo(sql)
                        #except:
                            #time.sleep(30)
                            #mydatabase.runsqlsolo(sql)
            #break
class Scheduler(object):#队列
    def __init__(self):
        self.tasks=[]
        self.count=0
    def add(self,task):
        self.tasks.append(task)
        self.count+=1
        #print 'add '+task.url
    def add_pre(self,task):
        t=[task]
        self.tasks=t+self.tasks
        self.count+=1
        #print 'add '+task.url
    def addlist(self,tasklist):
        for i in tasklist:
            if i.ordertag==0:
                self.add_pre(i)
            else:
                self.add(i)        
    def use(self):
        if self.count>0:
            curtask=self.tasks[0]
            self.tasks.pop(0)
            self.count-=1
            print 'use '+curtask.url
            return curtask
        else:
            return None
        
class Task(object):#队列中任务
    def __init__(self,to_downloader=None,url=None,customername=None,icookie=None,header_dict=None):
        self.to_downloader=to_downloader
        self.url=url
        self.cookie=icookie#如果存在下载类需要加载
        self.header=header_dict#如果存在下载类需要加载\
        self.ordertag=None#0加在最前面,1加在最后面
        self.refdict=None
        
    def getfromtuple(self,ituple):
        self.to_downloader=ituple[0]
        self.url=ituple[1]
            
        

############################################
class NetReturn(object):#下载后返回的页面,一个页面一个对象
    def __init__(self):
        self.to_spider=None#用来判断由哪一个爬虫来解释
        self.task=None#来自哪一个任务,其中按fromwhere,cate,id,sku的层次记录
        self.contents=None#返回的页面
        self.cookie=None#返回需要下一次任务使用的cookie
        
class Downloader(object):#下载类基类
    def __init__(self):
        self.netreturns=[]#用来记录返回的下载件组
        
class Downloader_1(Downloader):
    '''使用cookie，但是结果不传递cookie'''
    def __init__(self,task):
        Downloader.__init__(self)
        self.__go(task)
        
    def __go(self,task):
        netreturn=NetReturn()
        netreturn.task=task
        netreturn.name='Spiders_Findgoods'
        cj = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        try:
            contents=opener.open(urllib2.Request(task.url)).read()
            print len(contents)
        except:
            time.sleep(3)
            contents=opener.open(urllib2.Request(task.url)).read()
        netreturn.contents=contents.decode('gbk').encode('utf-8')
        self.netreturns.append(netreturn)

class Spider(object):#爬虫类基类
    def __init__(self):
        self.products=[]#返回后的产品组
        self.tasks=[]#返回后的任务组
        
class Spiders_Findgoods(Spider):
    def __init__(self,netreturn):
        Spider.__init__(self)
        global RUNDATE#用来记录任务的时间
        contents=netreturn.contents
        #print contents

        soup_detail =BeautifulSoup(contents)
        self.__getgoodsitems(soup_detail,netreturn.task.refdict['cate'])
        #try to find next page,if not exist,return None!
        try:
            urlnext=soup_detail.find('a',{'class':'ui-page-next'})['href'][:-9]
            urlnext='http://list.tmall.com/search_product.htm'+urlnext
            ctask=Task()
            ctask.to_downloader='Downloader_1'
            ctask.url=urlnext
            ctask.refdict={'cate':netreturn.task.refdict['cate']}
            self.tasks.append(ctask)
        except:
            urlnext=None
            
            
        
        
    def __getgoodsitems(self,soup,cate): 
        for detailitem in soup.find('div',{'class':'view grid-nosku '}).findAll('div',{'class':'product'}):
            #print 'ok'
            everydaySaleData=EverydaySaleData()
            everydaySaleData._insertupdate='insert'
            everydaySaleData.id=id=detailitem['data-id']
            everydaySaleData.name=detailitem.find('p',{'class':'productTitle'}).a['title']
            everydaySaleData.cate=cate
            everydaySaleData.href='http://detail.tmall.com/item.htm?id='+everydaySaleData.id
            everydaySaleData.shop=detailitem.find('p',{'class':'productStatus'}).find('span',{'data-icon':'small'})['data-nick']
            everydaySaleData.rundate=RUNDATE
            everydaySaleData.price=detailitem.find('p',{'class':'productPrice '}).em['title']
            everydaySaleData.salenum=detailitem.find('p',{'class':'productStatus'}).span.em.contents[0][:-1]

            self.products.append(everydaySaleData)
            
     

        
###############################################
class Product(object):
    def __init__(self):
        self._insertupdate=None
        self._keyfield=None
        
    def getInsertSQL(self):#生成插入到数据库的SQL
        tablename=self.__class__.__name__
        idict=self.__dict__
        if self._insertupdate=='insert':
            sql_1='insert ignore into '+tablename+'('
            sql_2=') values('
            for i,j in idict.items():
                if i[0]!='_':
                    if j!=None:
                        sql_1+=i+','
                        sql_2+="'"+j+"',"
            sql=sql_1[:-1]+sql_2[:-1]+')'
            return sql
        elif self._insertupdate=='update':
            sql_1='update '+tablename+' set '
            sql_2=' where '+self._keyfield+'="'+self.__dict__[self._keyfield]+'"'
            for i,j in idict.items():
                if i[0]!='_':
                    if j!=None:
                        sql_1+=i+'='+"'"+j+"',"
            sql=sql_1[:-1]+sql_2
            return sql
        
class Commodity(Product):
    def __init__(self):
        Product.__init__(self)
        self.id=None
        self.fromwhere=None
        self.shop=None
        self.name=None
        self.href=None
        self.cate=None
        self.brand=None
            
class EverydaySaleData(Product):
    def __init__(self):
        self.id=''
        self.rundate=''
        self.price=''
        self.salenum=''
        self.name=''
        self.href=''
        self.shop=''
        self.cate=''
        

        
        


        
########################################
class MyDatabaseTmall(object):
    def __init__(self):
        self.conn=conn=conn=MySQLdb.connect(host='115.238.55.68',user='root',passwd='123,abc',db='tmall',port=3306,charset='utf8' )
        self.cursor = conn.cursor()
        self.sqllist=[]
        self.sqlcount=0
        
    def addsql(self,sql):
        self.sqllist.append(sql)
        self.sqlcount+=1
    
    def runsqllist(self):
        for sql in self.sqllist:
            self.runsqlsolo(sql)
        
    def runsqlsolo(self,sql):
        print sql
        self.cursor.execute(sql)#.decode('utf-8').encode('gbk'))
        self.conn.commit()
        
    def fetchdata(self,sql):
        #print sql
        self.cursor.execute(sql)
        ituple=self.cursor.fetchall()
        return ituple
    
    def getfielddata(self,fieldname_tuple,table):
        sql='select '
        for i in fieldname_tuple:
            sql+=i+','
        #print sql
        sql=sql[:-1]+' from '+table
        #print sql
        ituple=self.fetchdata(sql)#.decode('utf-8').encode('gbk'))
        return ituple
        
##################################################


if __name__=='__main__':
    url1='http://list.tmall.com/search_product.htm?cat=53360011&q=%D2%FE%D0%CE%D1%DB%BE%B5&sort=s&style=g&search_condition=7&from=sn_1_cat-qp&active=1&industryCatId=50074933'
    url2='http://list.tmall.com/search_product.htm?cat=53374011&q=%D2%FE%D0%CE%D1%DB%BE%B5&sort=s&style=g&search_condition=23&from=sn_1_rightnav&active=1&industryCatId=50074933'
    url3='http://list.tmall.com/search_product.htm?cat=53350014&q=%D2%FE%D0%CE%D1%DB%BE%B5&sort=s&style=g&search_condition=23&from=sn_1_rightnav&active=1&industryCatId=50074933'
    url4='http://list.tmall.com/search_product.htm?cat=53288023&q=%D2%FE%D0%CE%D1%DB%BE%B5&sort=s&style=g&search_condition=23&from=sn_1_rightnav&active=1&industryCatId=50074933'
    
    ilist=[]
    for x in ((url1,'隐形眼镜'),(url2,'彩色隐形眼镜'),(url3,'护理液'),(url4,'隐形眼镜伴侣')):
        task=Task()
        task.to_downloader='Downloader_1'
        task.url=x[0]
        task.refdict={'cate':x[1]}
        ilist.append(task)
    if RUNDATE=='':
        RUNDATE=time.strftime('%Y-%m-%d', time.localtime())
        print RUNDATE
    scrapyEngine(ilist)
    #except:
        #time.sleep(60)
        #continue