import requests
from scrapy import Selector
import time

def check(ip, port, type) :
    check_url = 'https://cn.bing.com/search?q=ip'
    proxies = {
        'https': 'http://' + ip + ':' + str(port),
    }
    try:
        r = requests.get(check_url, proxies=proxies, timeout=3)
        if r.status_code != 200:
            return
        cselector = Selector(text=r.text)
        txt = cselector.css('div[class=b_xlText]::text').extract_first()
        if txt != None :
            print('"' + 'http://' + ip + ':' + str(port) + '",')
    except:
        pass

if __name__ == '__main__' :
    proxys = list()
    headers = {'User-Agent': 'Mozilla/5.0'}

    url = 'http://www.kuaidaili.com/free/inha/'

    for i in range(1, 6) :
        r = requests.get(url + str(i), headers = headers)
        selector = Selector(text=r.text)
        all = selector.css('table tbody tr')
        for one in all:
            items = one.css('td::text').extract()
            proxys.append({'ip': items[0],
                           'port': items[1],
                           'anonymous': items[2],
                           'type': items[3],
                           'alive': items[6],
                           })
        # time.sleep(1)

    url = 'http://www.kuaidaili.com/free/intr/'
    for i in range(1, 6) :
        r = requests.get(url + str(i), headers = headers)
        selector = Selector(text=r.text)
        all = selector.css('table tbody tr')
        for one in all:
            items = one.css('td::text').extract()
            proxys.append({'ip': items[0],
                           'port': items[1],
                           'anonymous': items[2],
                           'type': items[3],
                           'alive': items[6],
                           })
    # proxys = list(set(proxys))
    print(proxys)

    for proxy in proxys :
        if proxy['type'] == 'HTTPS' :
            check(proxy['ip'], proxy['port'], proxy['type'])
