'''
Created on May 26, 2011

@author: Jialai_Zhu
'''
def parser_from_cnproxy(page,RET):
#    z="3";m="4";k="2";l="9";d="0";b="5";i="7";w="6";r="8";c="1";
    from BeautifulSoup import *
    soup=BeautifulSoup(page,fromEncoding="gb2312")
    tags=soup.findAll('tr')
    added=0
    for tr in tags:
        try:
            td_ip=tr.next
            ip=str(td_ip.contents[0])
            port=td_ip.contents[1].next
            
            import re
            result=re.search('(\+\w)+', port)
            port=result.group()
            c2n={"z":"3","m":"4","k":"2",
                 "l":"9","d":"0","b":"5",
                 "i":"7","w":"6","r":"8","c":"1","+":""}
            for c in c2n:
                import string
                port=string.replace(port,c,c2n[c])
            http=td_ip.nextSibling.next
            if http=='HTTP':
                RET.append({'http':"%s:%s"%(ip,port)})
                added=added+1
                print "%s:%s"%(ip,port)
        except Exception,ex:
            print ex
            pass
    return added
  
    
class cnproxy_com:
    def __init__(self):pass
    def get_list(self,PROXY_LIST,max_len=0):
        from g import browser
        i=1

        while i<10:
            url="http://www.cnproxy.com/proxy%d.html"%(i)
            proxy=None
#            {"socks":"127.0.0.1:6666"}
            B=browser(proxy=proxy,debug=False)
            B.open(url)            
            ret=parser_from_cnproxy(B.html(),PROXY_LIST)
            if ret<1:
                break
            if max_len and len(PROXY_LIST)>max_len:
                break
            i=i+1
 
def check_proxy(test,proxy_list,work_proxy_list):
    from g import browser
    from g import BrowserError
#    test_url='http://www.google.com.hk'
    test_url=test['url']
    test_method=test['method']
#    b=browser(debug=False,proxy=None)
#    b.open(url=test_url,retry_timeout=None)
#    test_len=len(b.html())   
#    print  test_len
    dump=open("dump.html","wb")
    def check_proxy(i):
        try:
            b=browser(proxy=[i],debug=False)
            b.open(url=test_url,retry_timeout=None)
            html=b.html()
            body_len=len(html)
            if body_len==0:
                return
#            print html
            dump.write(html)            
            if test_method(html):
#            x=body_len-test_len
#            print x
#            print abs(x)
#            if abs(x)*30<test_len:
#                print i,'good good good good'
                work_proxy_list.append(i)
        except Exception,ex:
            pass
        
    from t import ThreadPool
    pool = ThreadPool(40)
    pool.run(list=proxy_list, func=check_proxy)
    return work_proxy_list

def is_google(page):
    if page.find('window.google.timers')>0:
        return True
    else:
        return False
test_google={'url':'http://www.google.com.hk','method':is_google} 

def get_proxy(url=None):
    if url==None:
        url=test_google
    list_ret=[]
    list=[]
    try:
        cnproxy_com().get_list(list,0)
    except Exception,ex:
        print ex
        pass
    try:
        ret=check_proxy(url,list,list_ret)
    except:pass
    f=open("proxy.list","wb")
    for i in ret:
        f.write(i['http'])
        f.write('\r\n')
    return ret


def FILE_PATH(path):
    import os
    BASE=os.path.dirname(p=__file__)
    p=os.path.join(BASE,path)
    return p
def load_proxy_list():
    list=[]
    for line in open(FILE_PATH("proxy.list"),"rb").readlines():
        import string 
        s=line
        old='\r\n'
        new=''
        proxy=string.replace(s, old, new)
        list.append({'http':proxy})
    return list
if __name__ == '__main__':
    print get_proxy()
    pass
