# -*- coding: utf-8 -*- 

'''从天猫的用户关注粉丝关系网中取得用户的信息（用户名　ＩＤ和编码）'''

import urllib2
import cookielib
from BeautifulSoup import BeautifulSoup,SoupStrainer
import time
import MySQLdb
import re


#在win7系统下需要把这一段注释掉的代码启用
import sys
default_encoding = 'utf-8'

if sys.getdefaultencoding() != default_encoding:
    reload(sys)
    sys.setdefaultencoding(default_encoding)


RUNDATE=''#运行日期


def scrapyEngine(url_tuple):
    mydatabase=MyDatabaseTmall()
    scheduler=Scheduler()#队列
    
    for t in url_tuple:
        task=Task()
        task.getfromtuple(t)
        scheduler.add_pre(task)
        
    while scheduler.count>0:
        curtask=scheduler.use()#从队列中取得任务
        mydownloader=downloader_middlewares(curtask)#取得下载件组
        for j in mydownloader.netreturns:#遍列下载件组
            myspider=spiders_middlewares(j)#处理一个下载件，取得爬虫
            if myspider!=None:#如果存在爬虫结果，执行
                if len(myspider.tasks)!=0:#将爬虫对象中的任务放到任务队列中去
                    scheduler.addlist(myspider.tasks)
                if len(myspider.products)!=0:#将爬虫对象中的产品组放到清洗件中去
                    for curproduct in myspider.products:
                        sql=curproduct.getInsertSQL()
                        print sql
                        '''
                        try:
                            mydatabase.runsqlsolo(sql)
                        except:
                            time.sleep(30)
                            mydatabase.runsqlsolo(sql)
                            '''
            #break
class Scheduler(object):#队列
    def __init__(self):
        self.tasks=[]
        self.count=0
    def add(self,task):
        self.tasks.append(task)
        self.count+=1
        #print 'add '+task.url
    def add_pre(self,task):
        t=[task]
        self.tasks=t+self.tasks
        self.count+=1
        #print 'add '+task.url
    def addlist(self,tasklist):
        for i in tasklist:
            if i.ordertag==0:
                self.add_pre(i)
            else:
                self.add(i)        
    def use(self):
        if self.count>0:
            curtask=self.tasks[0]
            self.tasks.pop(0)
            self.count-=1
            #print 'use '+curtask.url
            return curtask
        else:
            return None
        
class Task(object):#队列中任务
    def __init__(self,taskname=None,url=None,ref=None,icookie=None,header_dict=None):
        self.taskname=taskname
        self.url=url
        self.ref=ref
        self.cookie=icookie#如果存在下载类需要加载
        self.header=header_dict#如果存在下载类需要加载\
        self.ordertag=None#0加在最前面,1加在最后面
        
    def getfromtuple(self,ituple):
        self.taskname=ituple[0]
        self.url=ituple[1]
        self.ref=ituple[2]
            
        
####################################################
def downloader_middlewares(task):#下载中间件，需配制，选择合适的下载类
    refDict={'findcustomerjudgerecord':Downloader_2,
             }
    #print task.taskname
    mydownloader=refDict.get(task.taskname)(task)
    return mydownloader

def spiders_middlewares(netreturn):#爬虫中间件，需配制，选择合适的爬虫类
    refDict={'findcustomerjudgerecord':Spiders_Findcustomer_judgerecord,
             }
    myspider=refDict.get(netreturn.name)(netreturn)
    return myspider

############################################
class NetReturn(object):#下载后返回的页面,一个页面一个对象
    def __init__(self):
        self.name=None#用来判断由哪一个爬虫来解释
        self.task=None#来自哪一个任务,其中按fromwhere,cate,id,sku的层次记录
        self.contents=None#返回的页面
        self.cookie=None#返回需要下一次任务使用的cookie
        
class Downloader(object):#下载类基类
    def __init__(self):
        self.netreturns=[]#用来记录返回的下载件组
        
class Downloader_2(Downloader):
    '''使用cookie，使用ref'''
    def __init__(self,task):
        Downloader.__init__(self)
        self.__go(task)
        
    def __go(self,task):
        #print 'ok'
        netreturn=NetReturn()
        netreturn.task=task
        netreturn.name=task.taskname
        cj = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        req=urllib2.Request(task.url)
        req.add_header('referer', task.ref)
        try:
            contents=opener.open(req).read()
        except:
            time.sleep(3)
            contents=opener.open(req).read()
        netreturn.contents=contents.decode('gbk').encode('utf-8')
        self.netreturns.append(netreturn)
        #print netreturn.contents
class Spider(object):#爬虫类基类
    def __init__(self):
        self.products=[]#返回后的产品组
        self.tasks=[]#返回后的任务组
        
class Spiders_Findcustomer_judgerecord(Spider):
    def __init__(self,netreturn):
        Spider.__init__(self)
        global RUNDATE#用来记录任务的时间
        contents=netreturn.contents
        print contents
        print 'ok'
        #天猫的页面编码为GBK，BeautifulSoup默认使用为utf8．
        if netreturn.task.url[-4:]=='.htm':
            customer=Customer()
            customer._insertupdate='update'
            customer.fullname=netreturn.task.customername
            links=SoupStrainer('div',{'class':'hp-user-info'})
            soup = BeautifulSoup(contents,links)
            customer.userid=soup.find('span',{'class':'action sns-widget-friendFollow follow-ui-green '})['data-userid']
            customer.usercode=re.search(r'http://my.taobao.com/(.*?)/',str(soup.find('li',{'class':'atten-item fans'}).a['href'])).group(1)#rong
            try:
                #print soup.find('bd')#.find('img')
                customer.buycredit=re.search(r'_(.*?_.)\.',str(soup.find('div',{'class':'bd'}).find('img')['src'])).group(1)
            except:
                pass
            try:
                customer.sex=str(soup.find('div',{'class':'line'}).contents[0]).replace('\t','').replace('\r\n','')
            except:
                pass
            try:
                soup2=soup.find('span',{'id':'J_HomePageConstellation'})
                try:
                    customer.born_month=soup2['data-month']
                    customer.born_day=soup2['data-day']
                    try:
                        customer.postcode=soup2.contents[0]
                    except:
                        pass
                except:
                    pass
            except:
                pass
            customer.usetag='y'
            links3=SoupStrainer('div',{'class':'fans-wrap'})
            soup3= BeautifulSoup(contents,links3)
            customer.fans_count=re.search('\((.*?)\)',str(soup3.find('li',{'class':'item s-active '}).a.contents[0])).group(1)
            customer.follows_count=re.search('\((.*?)\)',str(soup3.find('li',{'class':'item '}).a.contents[0])).group(1)
            self.products.append(customer)
        
        link_detail=SoupStrainer('div',{'class':'fans-list clearfix'})
        soup_detail=BeautifulSoup(contents,link_detail)
        if soup_detail!=None:
            for tt in soup_detail.findAll('div',{'class':'fans-item'}):
                #print tt
                customer2=Customer()
                customer2._insertupdate='insert'
                customer2.userid=re.search(r'userId=(.*?)&',str(tt.find('div',{'class':'bd'}).find('a',{'class':'pic'}).img['src'])).group(1)
                try:
                    customer2.fullname=tt.find('div',{'class':'bd'}).find('a',{'class':'name'}).contents[0]
                except:
                    pass
                
                customer2.usercode=tt.find('div',{'class':'bd'}).find('a',{'class':'pic'})['href'][21:]
                try:
                    customer2.buycredit=re.search(r'_(.*?_.)\.',str(tt.find('div',{'class':'bd'}).find('img',{'class':'rank'})['src'])).group(1)
                except:
                    pass
                #print customer2.id
                
                self.products.append(customer2)
                
        link_next=SoupStrainer('p',{'class':'pg-list'})
        soup_link=BeautifulSoup(contents,link_next)
        task_next=Task()
        task_next.ordertag=1
        task_next.taskname='findcustomer'
        try:
            task_next.url=soup_link.find('a',{'class':'next'})['href']
            self.tasks.append(task_next)
        except:
            pass
        
                
        

        
###############################################
class Product(object):
    def __init__(self):
        self._insertupdate=None
        self._keyfield=None
        
    def getInsertSQL(self):#生成插入到数据库的SQL
        tablename=self.__class__.__name__
        idict=self.__dict__
        if self._insertupdate=='insert':
            sql_1='insert ignore into '+tablename+'('
            sql_2=') values('
            for i,j in idict.items():
                if i[0]!='_':
                    if j!=None:
                        sql_1+=i+','
                        sql_2+="'"+j+"',"
            sql=sql_1[:-1]+sql_2[:-1]+')'
            return sql
        elif self._insertupdate=='update':
            sql_1='update '+tablename+' set '
            sql_2=' where '+self._keyfield+'="'+self.__dict__[self._keyfield]+'"'
            for i,j in idict.items():
                if i[0]!='_':
                    if j!=None:
                        sql_1+=i+'='+"'"+j+"',"
            sql=sql_1[:-1]+sql_2
            return sql
class Commodity(Product):
    def __init__(self):
        Product.__init__(self)
        self.id=None
        self.fromwhere=None
        self.shop=None
        self.name=None
        self.href=None
        self.cate=None
        self.brand=None
            
class Customer(Product):
    def __init__(self):
        self._keyfield='userid'
        self.usercode=None
        self.userid=None
        self.fullname=None#全名
        self.sex=None
        self.born_month=None
        self.born_day=None
        self.postcode=None
        self.fans_count=None
        self.follows_count=None
        self.email=None
        self.qq=None
        self.buycredit=None
        self.salecredit=None
        self.usetag=None
        
        
class CustomerBuyRecord(Product):#仅用来参考
    def __init__(self):
        self.fullname=None#全名
        self.guessname=None#简化名字
        self.vip=None#会员等级
        self.credit=None#信用等级
        self.buycomidity_id=None
        self.buycomidity_fullname=None
        self.buycomidity_shop=None
        self.buycomidity_sku=None
        self.buycomidity_cate=None
        self.buycomidity_brand=None
        self.buytime=None
        self.buyprice=None
        
class CustomerJudgeRecord(Product):#筛选出部分有用的用户名
    def __init__(self):
        self.fullname=None#全名
        self.guessname=None#简化名字
        self.vip=None#会员等级
        self.credit=None#信用等级
        self.buycomidity_id=None
        self.buycomidity_fullname=None
        self.buycomidity_shop=None
        self.buycomidity_sku=None
        self.buycomidity_cate=None
        self.buycomidity_brand=None
        self.buytime=None
        self.buyprice=None
        

        
########################################
class MyDatabaseTmall(object):
    def __init__(self):
        self.conn=conn=conn=MySQLdb.connect(host='115.238.55.68',user='root',passwd='123,abc',db='tmall',port=3306,charset='utf8' )
        self.cursor = conn.cursor()
        self.sqllist=[]
        self.sqlcount=0
        
    def addsql(self,sql):
        self.sqllist.append(sql)
        self.sqlcount+=1
    
    def runsqllist(self):
        for sql in self.sqllist:
            self.runsqlsolo(sql)
        
    def runsqlsolo(self,sql):
        print sql
        self.cursor.execute(sql)#.decode('utf-8').encode('gbk'))
        self.conn.commit()
        
    def fetchdata(self,sql):
        #print sql
        self.cursor.execute(sql)
        ituple=self.cursor.fetchall()
        return ituple
    
    def getfielddata(self,fieldname_tuple,table):
        sql='select '
        for i in fieldname_tuple:
            sql+=i+','
        #print sql
        sql=sql[:-1]+' from '+table
        #print sql
        ituple=self.fetchdata(sql)#.decode('utf-8').encode('gbk'))
        return ituple
        
##################################################


if __name__=='__main__':
    while True:
        #try:
            ilist=[]
            xxx=MyDatabaseTmall()
            mytuple=xxx.fetchdata('select userid,usercode from customer where usetag is null and usercode is not null limit 200')
            for x,y in mytuple:
                #print x,y
                ilist.append(('findcustomerjudgerecord','http://rate.taobao.com/member_rate.htm?callback=shop_rate_list&content=1&result=&from=rate&user_id='+y+'&identity=1&rater=3&direction=1&page=1','http://detail.tmall.com/item.htm?id='+x))
            if RUNDATE=='':
                RUNDATE=time.strftime('%Y-%m-%d', time.localtime())
                print RUNDATE
            scrapyEngine(ilist)
        #except:
            #time.sleep(60)
            #continue