#! /usr/bin/python
#coding=utf-8
import hashlib,re,sys
from sqlalchemy import *
from sqlalchemy.orm import mapper, sessionmaker
from bs4 import BeautifulSoup
from  datetime  import  *
import gzip,urllib2,random,time
import urllib
import StringIO
#import codecs #处理"°C" 这样的符号用
#look  =  codecs.lookup( " utf-8 " )
# i=look.decode(i)

default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
    reload(sys)
    sys.setdefaultencoding(default_encoding)
print sys.getdefaultencoding()



class Category(object):
    __name__='category'
    id=Column(Integer, primary_key=True)
    name=Column(CHAR(255))
    url=Column(CHAR(255))
    title=Column(CHAR(255))
    keywords=Column(CHAR(255))
    description=Column(Text)
    desc=Column(Text)
    url_code=Column(Unicode(255))
    level=Column(Unicode(255))

class Top(object):
    __name__='top'
    id=Column(Integer, primary_key=True)
    name=Column(CHAR(255))
    url_code=Column(Unicode(255))
    url=Column(Unicode(255))
    cat_code=Column(Unicode(255))
    order=Column(Integer)
    focus_name=Column(Unicode(255))

class Product(object):
    __name__='product'
    id=Column(Integer, primary_key=True)
    name=Column(CHAR(255))
    url_code=Column(Unicode(255))
    url=Column(Unicode(255))
    image=Column(Unicode(255))
    price=Column(Unicode(255))
    cat_code=Column(Unicode(255))
    page=Column(Integer)

class ProductExt(object):
    __name__='product_ext'
    id=Column(Integer, primary_key=True)
    product_id=Column(Integer)
    images=Column(Text)
    summary=Column(Text)
    description_m=Column(Text)

class Cat:
    def __init__(self):
        #self.engine =create_engine('mysql://root:123456@localhost/focal?charset=utf8',echo=True)
        self.engine =create_engine('mysql://focal:focalweiming@localhost/focal?charset=utf8',echo=True)
        metadata = MetaData()
        category_table=Table('category',metadata,
            Column('id',Integer, primary_key=True),
            Column('name',CHAR(255)),
            Column('url',CHAR(255)),
            Column('title',CHAR(255)),
            Column('keywords',CHAR(255)),
            Column('description',Text),
            Column('desc',Text),
            Column('url_code',Unicode(255)),
            Column('level',Unicode(255)),
        )
        top_table=Table('top',metadata,
            Column('id',Integer, primary_key=True),
            Column('name',CHAR(255)),
            Column('url_code',Unicode(255)),
            Column('url',Unicode(255)),
            Column('cat_code',Unicode(255)),
            Column('order',Integer),
            Column('focus_name',Unicode(255)),
        )
        product_table=Table('product',metadata,
            Column('id',Integer, primary_key=True),
            Column('name',CHAR(255)),
            Column('url_code',Unicode(255)),
            Column('url',Unicode(255)),
            Column('cat_code',Unicode(255)),
            Column('image',Unicode(255)),
            Column('price',Unicode(255)),
            Column('page',Integer),
        )
        productext_table=Table('product_ext',metadata,
            Column('id',Integer, primary_key=True),
            Column('product_id',Integer),
            Column('images',Text),
            Column('summary',Text),
            Column('description_m',Text),
        )
        metadata.create_all(self.engine)
        mapper(Category,category_table)
        mapper(Top,top_table)
        mapper(Product,product_table)
        mapper(ProductExt,productext_table)
        Session = sessionmaker()
        Session.configure(bind=self.engine)
        self.db = Session()

    def getcat_top(self,url):
        html=self.get_gzip(url=None)
        soup = BeautifulSoup(html)
        NewArrivals=soup.find(name="ul",attrs={"class":"channelRecommend_products clearfix"})
        product_list=NewArrivals.find_all(name="li")
        i=1
        for pone in product_list:
            topm=Top()
            topm.url=pone.p.a.get('href').strip()
            #topm.img= pone.p.a.img.get('src').strip()
            #price=pone.find(name="span",attrs={"class":"f18 mr5 "})
            #print price.text.strip()
            topm.name=pone.p.a.img.get('alt').strip()
            topm.url_code=hashlib.md5(topm.url).hexdigest().upper()
            topm.cat_code=hashlib.md5('http://www.focalprice.com/apple-accessories/ca-001.html').hexdigest().upper()
            topm.order=i
            topm.focus_name="New Arrivals"
            i=i+1
            self.db.add(topm)
            self.db.flush()
            self.db.commit()
        # #print sub_cat
    def getcats(self):
        url='http://dynamic.focalprice.com/SiteMap'
        print "load SiteMap html in : %s" % (url)
        html=self.get_gzip(url=url)
        #print html
        soup = BeautifulSoup(html)
        print "Find name h4:"
        topcat=soup.find_all(name="h4")
        print topcat
        print "TOP Category start"
        for tcat in topcat:
            catm = Category()
            catm.name=tcat.a.string.strip()
            catm.url=tcat.a.get('href').strip()
            catm.url_code=hashlib.md5(catm.url).hexdigest().upper()
            catm.level='1'
            self.db.add(catm)
            self.db.flush()
            self.db.commit()
            print "TOP Category: %s" % (catm.name)
        print "TOP Category OK!"
        print "Sub Category start"
        print "find_all name=a :"
        towcat=soup.find_all(name="a",attrs={"class":"sub_name"})
        print towcat
        for tcat in towcat:
            catm = Category()
            catm.name=tcat.string.strip()
            catm.url=tcat.get('href').strip()
            catm.url_code=hashlib.md5(catm.url).hexdigest().upper()
            catm.level='2'
            self.db.add(catm)
            self.db.flush()
            self.db.commit()
            print "Sub Category: %s OK!" % (catm.name)
        print "Bottom Category start"
        print "find_all name=a :"
        botcat=soup.find_all(name="a",attrs={"class":"bottom_name"})
        print botcat
        for tcat in botcat:
            catm = Category()
            catm.name=tcat.string.strip()
            catm.url=tcat.get('href').strip()
            catm.url_code=hashlib.md5(catm.url).hexdigest().upper()
            catm.level='3'
            self.db.add(catm)
            self.db.flush()
            self.db.commit()
            print "Bottom category: %s OK! " % (catm.name)

    def products(self):
        print "Select Category level 2 and 3:"
        q=self.db.query(Category).filter(Category.level.in_((2,3)))
        for cat in q:
            ran=random.randint(3,20)
            print "start time :" , datetime.utcfromtimestamp(time.time())
            print "Time sleep %s" % (ran)
            time.sleep(ran)
            print "sleep time :" , datetime.utcfromtimestamp(time.time())
            print "Load product list html in : ",cat.url
            page=1
            pagenum=2
            html=self.get_gzip(url=cat.url)
            #print html
            bf = BeautifulSoup(html)
            while page<=pagenum:
                if(page==1):
                    #获取页面的页码数
                    ran=random.randint(3,20)
                    print "start time :" , datetime.utcfromtimestamp(time.time())
                    print "Time sleep %s" % (ran)
                    time.sleep(ran)
                    print "sleep time :" , datetime.utcfromtimestamp(time.time())
                    print "Show page number ... "
                    pageinfo=bf.find(name="div",attrs={"class":"pages fright"})
                    if pageinfo != None and pageinfo.span != None:
                        pagenum=int(re.findall( r'\d+\Z' ,pageinfo.span.text)[0])
                        print "page max number is :",pagenum
                else:
                    url='-%s.html' % (page)
                    url=cat.url.replace('.html',url)
                    print "Load html in:%s" % (url)
                    html=self.get_gzip(url=url)
                    #print html
                print "Load page in %s , total %s" % (page,pagenum)
                bf = BeautifulSoup(html)
                print "find all name=div :"
                plist=bf.find_all(name="div",attrs={"class":"itembox ml15 mb20"})
                print plist
                print "Show product list ..."
                for pone in plist:
                    product=Product()
                    proimg=pone.find(name="li",attrs={"class":"proImg"})
                    product.url= proimg.a.get("href").strip()
                    product.image= proimg.a.img.get("src").strip()
                    product.name= proimg.a.img.get("alt").strip()
                    proprice=pone.find(name="span",attrs={"currencycode":"USD"})
                    product.price= proprice.text.strip()
                    product.cat_code=cat.url_code
                    product.url_code=hashlib.md5(product.url).hexdigest().upper()
                    self.db.add(product)
                    self.db.flush()
                    self.db.commit()
                print "Load page in %s , total %s" % (page,pagenum)
                print "Netx pages"
                page=page+1
                if page>pagenum:
                    print "Now page is last page ..."

    def product_one(self):
        print "Select product list is all"
        #q=self.db.query(Product).all()
        #4084
        q=self.db.query(Product).filter(Product.id > 18160)
        i=1
        for pro in q:
            # url=urllib.quote(pro.url.lstrip("http://"))
            # print url
            # pro.url=url.replace('%A1%3','')
            # print url
            # url=look.decode(pro.url)
            # print url
            # pro.url=url[0].replace('\xb0','')
            # print pro.url
            ran=random.randint(2,6)
            print "start time :" , datetime.utcfromtimestamp(time.time())
            print "Time sleep %s" % (ran)
            #time.sleep(ran)
            print "sleep time :" , datetime.utcfromtimestamp(time.time())
            print "Load product html in: " , pro.url
            try:
                html=self.get_gzip(pro.url)
            except:
                print "LOAD HTML ERROR!!!!! PASS!"
                continue
            bf = BeautifulSoup(html)
            print "find div is class=description_m"
            description_m=bf.find(name="div",attrs={"class":"description_m"})
            #print description_m
            print "find div is class=summary"
            summary=bf.find(name="div",attrs={"id":"summary"})
            #print summary
            jqzoom=bf.find(name="ul",attrs={"id":"imgs"})
            jqzoom=jqzoom.find_all(name="img")
            images=""
            for img in jqzoom:
                #images=images+img.get("src")+"|:|"
                images=images+ img.get("jqimg")+","
                #images=images+ img.get("jqimg2")+"|x|"
            extm = ProductExt()
            extm.product_id=pro.id
            extm.images=images.strip()
            extm.summary=summary.text
            extm.description_m=description_m
            self.db.add(extm)
            self.db.flush()
            self.db.commit()
            print "add product %s is OK!" % (i)
            i=i+1



    def get_gzip(self,url):
        header = {'Accept-Charset':'GBK,utf-8;q=0.7,*;q=0.3','User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.151 Safari/534.16'}
        request = urllib2.Request(url,headers=header)
        request.add_header('Accept-encoding', 'gzip')
        opener = urllib2.build_opener()
        f = opener.open(request)
        print f
        isGzip = f.headers.get('Content-Encoding')
        if isGzip :
            compresseddata = f.read()
            compressedstream = StringIO.StringIO(compresseddata)
            gzipper = gzip.GzipFile(fileobj=compressedstream)
            data = gzipper.read()
        else:
            data = f.read()
        return data

    def get_html(self,url):
        if url != None:
            #url="http://www.focalprice.com/iphone-5s/ca-001024.html"
            page = urllib.urlopen(url)
            html = page.read()
        else:
            html="""

            """
        return html

if __name__ == '__main__':
    #init_db()
    print "Prepare data. is come soon ..."
    cat=Cat()
    print "Load Category data ..."
    #cat.getcats()
    print "Load Product list data ..."
    #cat.products()
    print "Load Product data ..."
    cat.product_one()
    print "OK!!!!"