#!/usr/bin/env python
# vim:fileencoding=gbk

"""
Get products information from www.360buy.com.cn
"""
import urllib
import time
from time import sleep
import re
import glob
import threading
import os
import sys
from sys import stdout
import logging
import logging.config
import logging.handlers
import img2string
from img2string import imgfile2string, img2string

SITE_HOME='http://www.360buy.com'
SUB_CATEGORIES_PREFIX='<em><a href="(products/.*?)">.*?</a></em>'
MAXPRODUCT_PATTERN=u'¹².*?>(\d+)<.*?¼þÉÌÆ·'
ONEHOUR=3600

def getpage(url=None, retryn=0):
    """ 
    Get the content of url and decode using 'gbk' protocol.
    Return the decoded string.

    url is a url string.    
    """
    if (retryn==4):
        logging.critical('Stop retrying to get %s'%url)
        return("")                
    if (url is None):
        pass
    else:
        logging.info('Trying to retrieve '+url+'.')
        try:
            f=urllib.urlopen(url)
            logging.info('Open URL success, return the content')
            currenttime=time.time()
            content=f.read()
            logging.debug('Read OK')
            content=content.decode('gbk','ignore')
            logging.debug('Decode OK')
            f.close()
            duration=time.time()-currenttime
            logging.info('Use '+str(duration)+' seconds to get content of the page')
            return content
        except:
                logging.critical("Failed to retrieve the content of "+url)
                sleep(5)
                retryn=retryn+1
                return(getpage(url,retryn))

def getpage_nodecode(url=None, retryn=0):
    """ 
    Get the original content of url

    url is a url string.    
    """
    if (retryn==4):
        logging.critical('Stop retrying to get %s'%url)
        return("")                
    if (url is None):
        pass
    else:
        logging.info('Trying to retrieve '+url+'.')
        try:
            currenttime=time.time()                
            f=urllib.urlopen(url)
            logging.info('Open URL success, return the content')
            content=f.read()
            logging.debug('Read OK')
            f.close()
            duration=time.time()-currenttime
            logging.info('Use '+str(duration)+' seconds to get content of the page')
            return content
        except:
                logging.critical("Failed to retrieve the content of "+url)
                sleep(5)
                retryn=retryn+1
                return(getpage(url,retryn))
        
                
def getsubcategories():
    """
    Get the subcategoires list of the site.
    """
    subcat=[]
    content=getpage(SITE_HOME).replace('\n','')
    items=re.findall(SUB_CATEGORIES_PREFIX,content)
    result=list(set(items))
    result=[SITE_HOME+'/'+item for item in result]
    logging.info('There are %d Sub-Categories'%len(result))
    return result

    

def getmaxpage(firstpage_content):
    """
    Get max page number of a Sub-Category.
    firstpage_content is the content of the first page of the specific sub-category.
    """
    content=firstpage_content.replace('\n','')
    result=re.search(MAXPRODUCT_PATTERN,content)
    if (result):
        r=int(result.groups()[0])/32+1
        logging.info('max page number is %d'%r)
        return r
    else:
        logging.critical("Failed to get the max page number, return 0")
        return 0

def getproductnum(content):
    """
    Get product number of a Sub-Category.
    content the content of the first page of the specific sub-category.
    """
    content=content.replace('\n','')
    result=re.search(MAXPRODUCT_PATTERN,content)
    if (result):
        return int(result.groups()[0])
    else:
        logging.critical("Failed to get the product number, return 0")
        return 0

def convertprice(id, url):
    """
    Convert the price image specified by url to price 
    """

    content=getpage_nodecode(url)
    try:
        filename=os.path.join(PATHPREFIX,'tmp/')+id+'.png'
        f=open(filename,'wb')
        f.write(content)
        f.close()
        price=imgfile2string(filename)
        return(price)
    except:
        logging.critical('Failed to convert price for %s'%id)            
        return("0")
    

def getitem(content):
    """Read content line by line to get specific information of each product item
       
       And save the info to the file of each item. Note I don't split the getitem and save item code 
       to two functions. Maybe later it can be considered.
       
       The filename is combined as 360buyid. 360buyid is the product id on 360buy. 
       Setting filename as this format means later we can use 360buyid as the key to access a specific product.

       The filepath will be in ./360buy/, hope the number of 360buy products will not exceed the 
       limit of file number in one directory. So that later we can easily search a product in one
       single directory ./360buy/.

       The content format of each file will be like following:
       [GENERAL]
       ID=360buyid
       URL=url
       TITLE=title
       IMGSRC=img
       [PRICE]
       DATETIME1        PRICE1#Seperate by table 
       DATETIME2        PRICE2

       Later there will be other programe processing the [PRICE] part of the file to get useful information.
    """    
    productItem={'id':"",'url':"",'title':"",'img':"",'price':"0"}
    needdetail=1
    product_count=0 #each page has at most 32 products.
    MAXPAGENUM=32
    content=content.replace('\n\r','')
    content=content.replace('\n','')
    PATTERN=u'<div class="Product_List_S7">(.*?)</ul>'
    result=re.findall(PATTERN,content)
    content=result[0]
    #print(content.encode('utf=8'))
    PATTERN=u'<dt>.*?<img.*?src="(.*?)".*?</dt><dd class="p_Name"><.*?title="(.*?)" href="(.*?)">.*?name=\'(.*?)\'.*?¾©¶«¼Û.*?src="(.*?)"'
    result=re.findall(PATTERN,content)
    logging.info('PATTERN match complete')
    if (result):
        for item in result:
            productItem['id']=item[3]
            productItem['url']=item[2]
            productItem['title']=item[1]
            productItem['img']=item[0]
            productItem['price']=convertprice(productItem['id'],item[4])
            logging.info("productItem['id']="+productItem['id'])
            logging.info("productItem['url']="+productItem['url'])
            logging.info("productItem['img']="+productItem['img'])
            logging.info("productItem['title']="+productItem['title'])
            logging.info("productItem['price']="+productItem['price'])
            #Search the ./360buy/ directory to see whether there is filename contain productitem['id']
            #If yes, that means we don't need to get general info of the product ---- just read the price
            #of it.            
            filename=os.path.join(PATHPREFIX,'360buy/'+productItem['id'])
            fnamel=glob.glob(filename)
            if (fnamel):
                #file exists, just append price info into it                    
                try:
                    FPRODUCT=open(filename,'a')
                    FPRODUCT.write('%f\t%s\n'%(time.time(),productItem['price']))                
                    FPRODUCT.close()
                except:
                    logging.critical('Failed to open file %s to append content'%filename)                        
            else:
                #file doesn't exist, create the file, write header info, then write price info                     
                #and also add product title and corresponding filename into "plist" file
                try:
                    FPRODUCT=open(filename,'w')
                    FPRODUCT.write("[GENERAL]\n")
                    FPRODUCT.write("ID=%s\n"%productItem['id'])
                    FPRODUCT.write("URL=%s\n"%productItem['url'])
                    FPRODUCT.write("TITLE=%s\n"%productItem['title'].encode('utf-8'))
                    FPRODUCT.write("IMGSRC=%s\n"%productItem['img'])
                    FPRODUCT.write("[PRICE]\n")
                    FPRODUCT.write('%f\t%s\n'%(time.time(),productItem['price']))
                    FPRODUCT.close()
                except:
                    logging.critical('Failed to open file %s to write content'%filename)                                                

                plist_sema.acquire()
                plist.append(str(time.time())+'::'+productItem['id']+'::'+productItem['title'])
                plist_sema.release()                    

def getrealid(url=None):
    """Get the content of url, then search product real id in it"""

    content=getpage(url)
    logging.info("Length of content of "+url+" is "+str(len(content)))
    result=re.search("äº§ååå·.*</span><span>(.*)&nbsp;</span></li>",content.encode('utf-8'))
    if (result):
        r=result.groups()[0].strip()
        r=r.replace('/','£¯')
        r=r.replace('*','¡Á')
        r=r.replace('?','£¿')
        logging.info("result="+r)
        return(r)

    return("")

def handle_category(url):
    """ Handle the speicif URL. 
        TODO: make this function muti-threadly.
    """

    
    starttime=time.time()
    logging.info(url)
    content=getpage(url)
    logging.info("Length of content is %d"%len(content))
    if (len(content)==0):
        logging.critical("Thread %s stopped due to fail to get the url"%threading.currentThread().getName())            
    else:
        maxpagenum=int(getmaxpage(content))
        logging.info("MAX page number is %d"%maxpagenum)
        currenttime=time.time()
        getitem(content)
        logging.info("Use "+str(time.time()-currenttime)+" seconds to handle content")
        urlpattern='(.*-)(.*).html'
        urlprefix,pagenumber=re.findall(urlpattern,url)[0]
        i=2
        while(i<=maxpagenum):
                logging.info("Reading page " +str(i)+"\n")
                content=getpage(urlprefix+str(i)+'.html')
                if (len(content)==0):
                   i=i+1
                   continue
                currenttime=time.time()
                getitem(content)
                logging.info("Use "+str(time.time()-currenttime)+" seconds to handle content")
                i=i+1
    
    duration=time.time()-starttime
    logging.info("Thread %s stopped"%threading.currentThread().getName())
    logging.info("For this category it use "+str(duration)+" seconds")
    logging.critical("Active thread number is %d"%(threading.activeCount()))
    e=threading.enumerate()
    l=[i.getName() for i in e]
    logging.debug("Current active thread is "+str(l))
    pool_sema.release()

if __name__=="__main__":
    PATHPREFIX=os.path.abspath(os.path.dirname(sys.argv[0]))
    logging.config.fileConfig(os.path.join(PATHPREFIX,'log.conf'))
    result=getsubcategories()        
    starttime=time.time()
    threads=[]
    plist=[]
    MAX_THREADS=25
    PLIST_MUTEX=1
    pool_sema=threading.BoundedSemaphore(value=MAX_THREADS)
    plist_sema=threading.BoundedSemaphore(value=PLIST_MUTEX)
    for url in result:
        logging.info(url)
        tname=url.replace('http://www.360buy.com/products/','')
        pool_sema.acquire()
        t=threading.Thread(target=handle_category,name=tname,args=(url,))
        threads.append(t)
        t.start()

    for t in threads:
        t.join(ONEHOUR)

    try:
        plfilename=os.path.join(PATHPREFIX,'360buy/plist')
        FPL=open(plfilename, 'a')
        for pitem in plist:
            FPL.write('%s\n'%pitem.encode('utf-8'))
        FPL.close()
    except:
        logging.critical('Failed to open %s to append content, exception caught!'%plfilename)                        

    logging.critical('Total time is %d'%(time.time()-starttime))
    sys.exit()
