# -*- coding: utf-8 -*- 

'''从 http://www.131458.com/tblist-1-4.html 中取得用户的信息'''

import urllib2
import cookielib
from BeautifulSoup import BeautifulSoup,SoupStrainer
import time
import MySQLdb
import re

'''
#在win7系统下需要把这一段注释掉的代码启用
import sys
default_encoding = 'utf-8'

if sys.getdefaultencoding() != default_encoding:
    reload(sys)
    sys.setdefaultencoding(default_encoding)
'''

RUNDATE=''#运行日期


def scrapyEngine(url_tuple):
    mydatabase=MyDatabaseTmall()
    scheduler=Scheduler()#队列
    
    for t in url_tuple:
        task=Task()
        task.getfromtuple(t)
        scheduler.add_pre(task)
        
    while scheduler.count>0:
        curtask=scheduler.use()#从队列中取得任务
        mydownloader=downloader_middlewares(curtask)#取得下载件组
        for j in mydownloader.netreturns:#遍列下载件组
            myspider=spiders_middlewares(j)#处理一个下载件，取得爬虫
            if myspider!=None:#如果存在爬虫结果，执行
                if len(myspider.tasks)!=0:#将爬虫对象中的任务放到任务队列中去
                    scheduler.addlist(myspider.tasks)
                if len(myspider.products)!=0:#将爬虫对象中的产品组放到清洗件中去
                    for curproduct in myspider.products:
                        sql=curproduct.getInsertSQL()
                        print sql
                        #mydatabase.runsqlsolo(sql)
            #break
        
class Scheduler(object):#队列
    def __init__(self):
        self.tasks=[]
        self.count=0
    def add(self,task):
        self.tasks.append(task)
        self.count+=1
        #print 'add '+task.url
    def add_pre(self,task):
        t=[task]
        self.tasks=t+self.tasks
        self.count+=1
        #print 'add '+task.url
    def addlist(self,tasklist):
        for i in tasklist:
            if i.ordertag==0:
                self.add_pre(i)
            else:
                self.add(i)        
    def use(self):
        if self.count>0:
            curtask=self.tasks[0]
            self.tasks.pop(0)
            self.count-=1
            print 'use '+curtask.url
            return curtask
        else:
            return None
        
class Task(object):#队列中任务
    def __init__(self,taskname=None,url=None,customername=None,icookie=None,header_dict=None):
        self.taskname=taskname
        self.url=url
        self.cookie=icookie#如果存在下载类需要加载
        self.header=header_dict#如果存在下载类需要加载
        self.ordertag=None#0加在最前面,1加在最后面
        
    def getfromtuple(self,ituple):
        self.taskname=ituple[0]
        self.url=ituple[1]
            
        
####################################################
def downloader_middlewares(task):#下载中间件，需配制，选择合适的下载类
    refDict={'findcustomer_131458':Downloader_1,
             }
    #print task.taskname
    mydownloader=refDict.get(task.taskname)(task)
    return mydownloader

def spiders_middlewares(netreturn):#爬虫中间件，需配制，选择合适的爬虫类
    refDict={'findcustomer_131458':Spiders_Findcustomer_131458,
             }
    myspider=refDict.get(netreturn.name)(netreturn)
    return myspider

############################################
class NetReturn(object):#下载后返回的页面,一个页面一个对象
    def __init__(self):
        self.name=None#用来判断由哪一个爬虫来解释
        self.task=None#来自哪一个任务,其中按fromwhere,cate,id,sku的层次记录
        self.contents=None#返回的页面
        self.cookie=None#返回需要下一次任务使用的cookie
        
class Downloader(object):#下载类基类
    def __init__(self):
        self.netreturns=[]#用来记录返回的下载件组
        
class Downloader_1(Downloader):
    '''使用cookie，结果传递cookie'''
    def __init__(self,task):
        Downloader.__init__(self)
        self.__go(task)
        
    def __go(self,task):
        netreturn=NetReturn()
        netreturn.task=task
        netreturn.name=task.taskname
        cj = cookielib.CookieJar()
        if task.cookie==None:
            cj = cookielib.CookieJar()
        else:
            cj=task.cookie
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        req=urllib2.Request(task.url)
        req.add_header('User-Agent','Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6')
        try:
            contents=opener.open(req).read()
        except:
            time.sleep(3)
            contents=opener.open(req).read()
        netreturn.contents=contents#.decode('gbk').encode('utf-8')
        self.netreturns.append(netreturn)

class Spider(object):#爬虫类基类
    def __init__(self):
        self.products=[]#返回后的产品组
        self.tasks=[]#返回后的任务组
        
class Spiders_Findcustomer_131458(Spider):
    def __init__(self,netreturn):
        Spider.__init__(self)
        global RUNDATE#用来记录任务的时间
        
        #天猫的页面编码为GBK，BeautifulSoup默认使用为utf8．
        contents=netreturn.contents
        #print contents
        
        links=SoupStrainer('div',{'class':'mainRate'})
        soup = BeautifulSoup(contents,links)
        for t in soup.find('ul',{'class':'itemRate'}).findAll('li'):
            customer=Customer()
            customer.userid=str(t.find('span',{'class':'id'}).contents[0]).replace('\r\n','').replace(' ','')
            customer.fullname=str(t.find('span',{'class':'nick'}).a.contents[0]).replace('\r\n','').replace(' ','')
            customer.buycredit=str(t.find('span',{'class':'buyer'}).contents[0]).replace('\r\n','').replace(' ','')
            customer.salecredit=str(t.find('span',{'class':'seller'}).contents[0]).replace('\r\n','').replace(' ','')
            #print customer.userid,customer.fullname
            self.products.append(customer)
            
            
        task=Task()
        task.ordertag=1
        task.taskname='findcustomer_131458'
        task.url='http://www.131458.com/tblist-'+str(int(re.search('-(\d+)-',netreturn.task.url).group(1))+1)+'-4.html'
        #task.url='http://www.131458.com/'+soup.find('ul',{'class':'RatePage'}).find('a',{'id':'Next'})['href']
        task.cookie=netreturn.cookie
        self.tasks.append(task)
        
        '''
        task=Task()
        task.ordertag=1
        task.taskname='findcustomer_fansfollows'
        task.url='http://my.taobao.com/'+customer.usercode+'/fans.htm'
        self.tasks.append(task)
        
        task2=Task()
        task2.ordertag=1
        task2.taskname='findcustomer_fansfollows'
        task2.url='http://my.taobao.com/'+customer.usercode+'/follows.htm'
        self.tasks.append(task2)
        '''

        
###############################################
class Product(object):
    def __init__(self):
        ''
        
    def getInsertSQL(self):#生成插入到数据库的SQL
        tablename=self.__class__.__name__
        sql_1='insert ignore into '+tablename+'('
        sql_2=') values('
        idict=self.__dict__
        for i,j in idict.items():
            if j!=None:
                sql_1+=i+','
                sql_2+="'"+j+"',"
        sql=sql_1[:-1]+sql_2[:-1]+')'
        return sql

class Commodity(Product):
    def __init__(self):
        Product.__init__(self)
        self.id=None
        self.fromwhere=None
        self.shop=None
        self.name=None
        self.href=None
        self.cate=None
        self.brand=None
            
class Customer(Product):
    def __init__(self):
        self.usercode=None
        self.userid=None
        self.fullname=None#全名
        self.sex=None
        self.born_month=None
        self.born_day=None
        self.postcode=None
        self.fans_count=None
        self.follows_count=None
        self.salecredit=None
        self.buycredit=None
        self.email=None
        self.qq=None
        
class CustomerBuyRecord(Product):#仅用来参考
    def __init__(self):
        self.fullname=None#全名
        self.guessname=None#简化名字
        self.vip=None#会员等级
        self.credit=None#信用等级
        self.buycomidity_id=None
        self.buycomidity_fullname=None
        self.buytime=None
        self.buyprice=None
        
class CustomerJudgeRecord(Product):#筛选出部分有用的用户名
    def __init__(self):
        self.fullname=None#全名
        self.guessname=None#简化名字
        self.vip=None#会员等级
        self.credit=None#信用等级
        self.buycomidity_id=None
        self.buycomidity_fullname=None
        self.buycomidity_judge=None
        self.buycomidity_judgetime=None
        

        
        
        
########################################
class MyDatabaseTmall(object):
    def __init__(self):
        self.conn=conn=conn=MySQLdb.connect(host='localhost',user='root',passwd='08034615',db='tmall',port=3306,charset='utf8' )
        self.cursor = conn.cursor()
        self.sqllist=[]
        self.sqlcount=0
        
    def addsql(self,sql):
        self.sqllist.append(sql)
        self.sqlcount+=1
    
    def runsqllist(self):
        for sql in self.sqllist:
            self.runsqlsolo(sql)
        
    def runsqlsolo(self,sql):
        #print sql
        self.cursor.execute(sql)#.decode('utf-8').encode('gbk'))
        self.conn.commit()
        
    def fetchdata(self,sql):
        #print sql
        self.cursor.execute(sql)
        ituple=self.cursor.fetchall()
        return ituple
    
    def getfielddata(self,fieldname_tuple,table):
        sql='select '
        for i in fieldname_tuple:
            sql+=i+','
        #print sql
        sql=sql[:-1]+' from '+table
        #print sql
        ituple=self.fetchdata(sql)#.decode('utf-8').encode('gbk'))
        return ituple
        
##################################################


if __name__=='__main__':
    ituple=(('findcustomer_131458','http://www.131458.com/tblist-17075-4.html'),
            )
    if RUNDATE=='':
        RUNDATE=time.strftime('%Y-%m-%d', time.localtime())
        print RUNDATE
    scrapyEngine(ituple)