#!/usr/bin/python
# -*- coding: utf-8 -*-
import base64

#-------------------------------------------------
#Create At: 2008-6-15 下午10:07:29 
#Create By: Fla.sam
#
#DESC: 
#
#-------------------------------------------------

#from BeautifulSoup import BeautifulSoup
import BeautifulSoup
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.ext import webapp
import cgi
import re
import wsgiref.handlers
import sys

#import HTMLParser

re_url = re.compile( r'''^(http|https|ftp)\://\w\S*''' )

class MainPage( webapp.RequestHandler ):
    def get( self ):
        self.response.out.write('''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">

<head>
<meta http-equiv="Content-Language" content="zh-cn" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Zen Proxy</title>
<style type="text/css">
body {
    font-family: "Times New Roman", Times, serif;
    font-size: 12px;
}
.maindiv {
}
.style1 {
    text-align: center;
}
</style>
</head>

<body>

<div class="style1">
    <form action="/u" method="put">
        URL:<input name="l" type="text" value="http://www.google.com" /><input name="Submit1" type="submit" value="submit" /><input name="Clean" type="reset" value="Clean" /></form>
</div>

</body>

</html>
''')

def do_parse_html(h):
    pass

def do_fetch_url( u ):
#    links = BeautifulSoup.SoupStrainer('a')
    try:
        request_respond =  urlfetch.fetch( url=u )
    except urlfetch.DownloadError:
        return """Open remote site Error"""
    except urlfetch.InvalidURLError:
        return """URL %s Error""" % u
    if request_respond.status_code != 200:
        return """Read Error"""
    else:
        
        html_source_instance = BeautifulSoup.BeautifulSoup( request_respond.content )
        href_links = html_source_instance.findAll('a')
        img_links = html_source_instance.findAll('img')
        for x in href_links:
            try:
                x['href'] = ROOT_URL+'/u?h=True&l=%s' % base64.encodestring(x['href'])
            except KeyError:
                pass
        for x in img_links:
            try:
                x['src'] = ROOT_URL+'/u?h=True&l=%s' % base64.encodestring(x['src'])
            except KeyError:
                pass
        
        return """%s""" % html_source_instance.html
    
class GetUrl( webapp.RequestHandler ):
    def get( self ):
        _url = self.request.get( 'l' )
        _hash = self.request.get('h')
        self.response.out.write( _hash )
        if _hash:
            _url = base64.decodestring(_url)
        else:
            pass
        html_source = do_fetch_url( _url )
        self.response.out.write( html_source )

def main():
    application = webapp.WSGIApplication( 
                                       [( '/', MainPage ), \
                                        ( '/u', GetUrl )],
                                       debug=True )
    wsgiref.handlers.CGIHandler().run( application )

if __name__ == "__main__":
    global ROOT_URL
    ROOT_URL = 'http://snote.appspot.com' 
    main()