'''
Created on 2 Jun 2012

@author: R
'''

import urllib2
import sys
import re

class mparser():
    def netconn(self,url,proxy='0'):
        try:
            if proxy == '0':            
                page = urllib2.urlopen(url, timeout=5)
                print 'connecting', url
                content = page.read()
                page.close()
            else:
                proxying = urllib2.ProxyHandler({'http':proxy})
                opener = urllib2.build_opener(proxying,urllib2.HTTPHandler)
                urllib2.install_opener(opener)
                page = urllib2.urlopen(url)
                content = page.read()
                page.close()
        except IOError:
            print 'connection failed', url
            pass
        
        charset = self.encodeset(content)
        print 'charset=',charset
        if charset == 'GBK':
            content = content.decode(charset,'ignore').encode('utf-8','ignore')
        return content


    def encodeset(self,htmls):
        charset = 'UTF-8'
        try:
            charsets = re.search('charset([^.]+)>', htmls, re.IGNORECASE|re.DOTALL).group(1).upper()
            if charsets.find('GBK') != -1 or charsets.find('GB2312') != -1 or charsets.find('GB18030') != -1 :
                charset = 'GBK'
        except (IndexError, AttributeError):  #check 'http://tianqi.2345.com/'
            print 'getcharset IndexError'
        return charset 
                
        
    def tablepaser(self, htmls):
        table = re.compile('<table[^>]*(>.*?<)/table>',re.DOTALL).findall(htmls)
        #TODO get class name
        for lines in table:
            line = re.compile('<tr[^>]*(>.*?<)/tr>',re.DOTALL).findall(lines)
            for one in line:
                if one == []:
                    continue
                #TODO tr/th/td get links, strip 
                h = re.compile('<th[^>]*(>.*?<)/th>',re.DOTALL).findall(one)
                rawl = ','.join([''.join(re.compile('>(.*?)<',re.DOTALL).findall(i)) for i in h])
                print ''.join(rawl.split()).replace('&nbsp;','')
                d = re.compile('<td[^>]*(>.*?<)/td>',re.DOTALL).findall(one)
                rawl =  ','.join([''.join(re.compile('>(.*?)<',re.DOTALL).findall(i)) for i in d])
                print ''.join(rawl.split()).replace('&nbsp;','')


if __name__ == '__main__':
    url = 'http://tianqi.2345.com/beijing/54511.htm'
    rmp = mparser()
    htmls = rmp.netconn(url)
    rmp.tablepaser(htmls)
    
