# -*- coding:utf-8 -*-

import urllib2
import re
import sys
import StringIO
import gzip
from datetime import datetime
from multiprocessing.dummy import Pool as ThreadPool

reload(sys)
sys.setdefaultencoding("utf-8")


user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) ' \
             'AppleWebKit/537.36 (KHTML, like Gecko) ' \
             'Chrome/63.0.3239.84 Safari/537.36'
headers = {'User-Agent': user_agent}

web_url = "https://www.qiushibaike.com/8hr/page/1/"
web_filter = u"糗事百科"


def get_http_proxy(url):
    try:
        request = urllib2.Request(url, headers=headers)
        response = urllib2.urlopen(request, timeout=3)
    except Exception,e:
        print '%s connect failed1' % url
        print Exception, e
        return False

    if response.code != 200:
        print 'HTTP %s Error:%s connect failed2' % (response.code, url)
        return False
    else:
        page_code = response.read()
        if "kuaidaili" in url:
            ip_list = re.findall(r'<td data-title="IP">(.*?)</td>.*?<td data-title="PORT">(.*?)</td>', page_code,re.S)
        elif "xicidaili" or "66ip" or "ip3366"in url:
            ip_list = re.findall(r'<td>(\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b)</td>.*?<td>(.*?)</td>', page_code, re.S)
        return ip_list


def Verify_Proxy(proxy_ip_port):
    if re.match(r"http:", web_url):
        http_prefix = "http://"
    else:
        http_prefix = "https://"
    opener = urllib2.build_opener(urllib2.ProxyHandler({http_prefix: http_prefix + proxy_ip_port}))
    urllib2.install_opener(opener)

    try:
        request = urllib2.Request(web_url, headers=headers)
        request.add_header('Accept-Encoding', 'gzip')
        response = urllib2.urlopen(request, timeout=15)
        print response.headers

        if 'Content-Encoding' in response.headers and response.headers['Content-Encoding'] == "gzip":
            compresseddata = response.read()
            print "压缩的数据长度为：%d" % len(compresseddata)
            compressedstream = StringIO.StringIO(compresseddata)
            gzipper = gzip.GzipFile(fileobj=compressedstream)
            page_content = gzipper.read()
            print "解压缩后数据长度为：%d" % len(page_content)
        else:
            page_content = response.read()
    except Exception,e:
        print '%s connect failed3' % proxy_ip_port
        print Exception, e
        return False
    else:
        if web_filter in page_content.encode("utf-8"):
            now_time = datetime.now().strftime('%Y-%m-%d-%H')
            with open("http_proxy_%s.txt" % now_time, 'a') as f:
                f.write(proxy_ip_port.strip() + "\n")
        else:
            print 'HTTP %s Error:%s connect failed4' % (response.code, proxy_ip_port)
            return False


if __name__ == '__main__':
    '''test 1'''
    # start_time = datetime.now()
    # proxy_info = ['135.245.48.34', '8000']
    # Verify_Proxy(proxy_info)
    # end_time = datetime.now()
    # print end_time - start_time


    '''test2'''
    urls = [
            "https://www.kuaidaili.com/free/",
            "http://www.xicidaili.com/",
            "http://www.66ip.cn/",
            "http://www.ip3366.net/"
            ]
    ip_list = set()
    for url in urls:
        ipinfo = get_http_proxy(url)
        if ipinfo:
            ip_list = map(lambda x: ":".join(x), set(ipinfo))
        print len(ip_list)
    start_time = datetime.now()
    pool = ThreadPool(50)
    pool.map(Verify_Proxy, ip_list)
    pool.close()
    pool.join()
    end_time = datetime.now()
    print "run time: %s" % (end_time - start_time)


