import urllib.request
from bs4 import BeautifulSoup
import re
import pytesseract
from PIL import Image


def get_html(url):
    proxy_handler = urllib.request.ProxyHandler({
        'http': 'http://127.0.0.1:1080',
        'https': 'https://127.0.0.1:1080'
    })
    opener = urllib.request.build_opener(proxy_handler)
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
        # 'Accept-Encoding': 'gzip, deflate, sdch',  # 最好注销掉他，因为会出现gzip压缩的乱码，不好解析
        'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
    }
    req = urllib.request.Request(url=url, headers=headers)
    # response = opener.open(req)  # 代理爬取
    response = urllib.request.urlopen(req)  # 本机爬取
    html = response.read()
    return html


def text_Bto8(text):
    if text[0] == 'B':
        return '8' + text[1:]
    return text


# 第一步：使用代理获取 html
url = 'https://proxy.mimvp.com/freeopen.php'
html = get_html(url).decode('utf8')
# 第二步：解析 html
# (ip:port, HTTP, 高匿， 国家 ，运营商 ， 验证时间 ）
# proxy_ip，type(0,1,2)，anonymity，region，operator, verification_time
tbody = re.search('.*?(<tbody>.*?</tbody>).*?', html, re.S)
tbodys = tbody.group(1)
# print(tbodys)
trs = re.findall(
    "<td class='tbl-proxy-id'.*?>([\d\.]*?)</td>.*?>([\d\.]*?)</td>.*?<img src=(.*?)/></td>.*?>([\w/]*?)</td>", tbodys,
    re.S)
ips = []
default_url = 'https://proxy.mimvp.com/'
for tr in trs:
    picture = get_html(default_url + tr[2])
    with open('20190310_6_.png', 'wb') as f:
        f.write(picture)
    image = Image.open("20190310_6_.png")
    text = pytesseract.image_to_string(image)
    proxy_ip = tr[1] + ':' + text_Bto8(text)
    type = tr[3]
    ips.append((proxy_ip, type))
# 输出
for ip in ips:
    print(ip)

# 总结：总是会把代理IP的端口的数字8识别成字母B，建议写个函数，把所有B改成8
