#!/usr/bin/python
import urllib
from HTMLParser import HTMLParser
import re
import logging
import sys

g_log_level = logging.DEBUG

class PageReader:
    "Raw proxy web page data reader"
    def __init__(self, url = "http://www.sina.com.cn", pageFile="page.html"):
        self.__url = url
        self.pageFile = pageFile

    def ReadFrom(self, url = ""):
        "Read the url into a webpage html file for furthur parsing"
        if "" == url:
            proxyPage = urllib.urlopen(self.__url)
        else:
            proxyPage = urllib.urlopen(url)
        proxyFile = file(self.pageFile, 'w')
        
        while True:
            s = proxyPage.readline()
            if len(s) > 0:
                proxyFile.write(s)
            else:
                break
            
        proxyFile.flush()
        proxyPage.close()
        proxyFile.close()


class ads_parser(HTMLParser):
    "class to parse the saved proxy webpage file"
    def __init__(self):
        HTMLParser.__init__(self)
        self.isProxyTag = False
        self.szProxyTagName = "span"
        self.szTagAttrName = "id"
        self.szTagAttrValue = "lbl_proxy"
        self.__proxyList = []
    
    def handle_starttag(self, tag, attrs):
        if self.is_ads_label(tag, attrs):
            self.isProxyTag = True
    
    def is_ads_label(self, tag, attrs):
    		ret = False
    		if self.szProxyTagName == tag:
    			for t in attrs:
    				if (self.szTagAttrName == t[0]) and (self.szTagAttrValue == t[1]):
    					ret = True
    		return ret

    def handle_endtag(self, tag):
        if self.szProxyTagName == tag and self.isProxyTag == True:
            self.isProxyTag = False

    def handle_data(self, data):
        if True == self.isProxyTag:
            seq = str(data).split("@")
            self.__proxyList.append(seq[0]+"\n")

    def flush_results(self):
        FlushData(self.__proxyList)
        
def extract_ad_str(s, pattern='''http://ad[0-9]\.sina\.com\.cn(\S*)\.swf'''):
	r = re.compile(pattern)
	#print len(s)
	result = r.search(s)
	if None == result:
		return ''
	else:
		return result.group(0)
	
def parse_ads_file(filename):
	f = open(filename, 'r')
	s = extract_ad_str(f.read())
	f.close()
	return s
        
def test():
	
	reader = PageReader(url='http://403.adsina.allyes.com/main/adfshow?user=AFP6_for_SINA|home|hometopbanner46895&db=sina&local=yes&js=off')
	reader.ReadFrom()
	logging.info('banner403: %s' % parse_ads_file(reader.pageFile));
	
	reader.ReadFrom('http://347.adsina.allyes.com/main/adfshow?user=AFP6_for_SINA|home|home28670banner&db=sina&local=yes&js=off')
	logging.info('banner347: %s' % parse_ads_file(reader.pageFile));
	
def set_logger():
        logging.basicConfig(level = g_log_level, format='%(asctime)s %(levelname)-8s %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename= 'ads.log')

	
if __name__=='__main__':
	set_logger()
	test()
