# -*- coding=utf-8 -*-

#网站：http://www.xicidaili.com/nn 【西刺代理】中的高匿IP地址网站
#本文使用random的使用代理IP的方式，参考：https://www.cnblogs.com/eric8899/p/6122759.html
#还有一种是使用循环代理IP的方式，可以参考：https://www.cnblogs.com/hearzeus/p/5157016.html

from bs4 import BeautifulSoup
import requests
import random
import time
import sys,traceback
import os.path
import os

user_agent_list=[
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]

def get_content(count,url,proxy):
    print(">>>>>>>>>>>开始第" + str(count) + "次请求:"+url)
    ua = random.choice(user_agent_list)  # 从user_agent_list中随机抽取出一个字符串
    # print(ua)
    header = {"User-Agent": ua}
    # headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    #            'Accept-Encoding': 'gzip, deflate, br',
    #            'Accept-Language': 'zh-CN,zh;q=0.9',
    #            'Cache-Control': 'max-age=0',
    #            'Connection': 'keep-alive',
    #            'Host': 'db.yaozh.com',
    #            'Cookie': '_ga=GA1.2.512734659.1515666397; PHPSESSID=3lhqascg9sfmhimrmek0iep4n6; Hm_lvt_65968db3ac154c3089d7f9a4cbb98c94=1515666398,1515666452,1516175475; _gid=GA1.2.1338173813.1516265800; yaozh_userId=529246; UtzD_f52b_saltkey=hTQI2WqJ; UtzD_f52b_lastvisit=1516262414; _ga=GA1.3.512734659.1515666397; yaozh_uidhas=1; yaozh_mylogin=1516266064; UtzD_f52b_ulastactivity=1516266010%7C0; UtzD_f52b_lastact=1516266065%09uc.php%09; think_language=zh-CN; WAF_SESSION_ID=3568c162bd11e2cac029c8ce64014bfe; _gat=1; Hm_lpvt_65968db3ac154c3089d7f9a4cbb98c94=1516331882',
    #            'Upgrade-Insecure-Requests': '1',
    #            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36'}
    try:
        requests.packages.urllib3.disable_warnings()
        html_body_opener = requests.get(url, verify=False, headers=header,proxies=proxy).content
        html_body_source = html_body_opener.decode("unicode-escape")
        print(">>>>>>>>>>>第"+str(count)+"次请求成功:"+url)
    except:
        raise
    return html_body_source

def write_to_txt(content, filename):
    # 如果文件存在就删除
    if (os.path.exists(filename)):
        os.remove(filename)

    outfile = open(filename, "w", encoding='utf-8')
    outfile.write(content)
    outfile.close()
    print("文件保存成功:"+filename)

def read_file_content(file_name):
    file_object = open(file_name, 'r', encoding='utf-8')
    try:
        all_the_text = file_object.read()
    finally:
        file_object.close()

    return all_the_text

def get_proxy_list():
    file_fullpath='proxy_web_source.txt'
    if not os.path.exists(file_fullpath):
        User_Agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0'
        header = {}
        header['User-Agent'] = User_Agent
        #proxy = {'http': 'http://125.89.54.194:9797','https': 'http://222.29.118.229:1080'}
        # url = 'http://www.xicidaili.com/nn/1'
        url = 'http://www.xicidaili.com/nt'
        req = requests.get(url, headers=header, proxies=None)
        res = req.content.decode("utf-8")
        # print(res)

        write_to_txt(res, file_fullpath)
        print("成功保存代理网页源代码到本地")
    else:
        print("代理网页已在本地保存")

    res=read_file_content(file_fullpath)
    soup = BeautifulSoup(res, "html.parser")
    ips = soup.findAll('tr')

    ip_list = []
    for x in range(1, len(ips)):
        iplist_temp = []
        ip = ips[x]
        tds = ip.findAll("td")
        ip_temp = tds[1].text
        port_temp = tds[2].text
        port_http_type = tds[5].text
        #print(ip_temp + "\t" + port_temp+ "\t" + port_http_type+ "\n")
        iplist_temp.append(ip_temp)
        iplist_temp.append(port_temp)
        iplist_temp.append(port_http_type)
        ip_list.append(iplist_temp)
    print("成功获取iplist")
    return ip_list


def generate_proxy_content():
    ip_list = get_proxy_list()
    random_ip_list = random.choice(ip_list)
    print("获取到的ip list是"+str(random_ip_list))
    ip_dict = {}
    ip_dict["http_type"] = random_ip_list[2]
    ip_dict["ip"] = random_ip_list[0]
    ip_dict["port"] = random_ip_list[1]
    #print(ip_dict)

    proxy_host = "http://" + ip_dict["ip"] + ":" + ip_dict["port"]
    if (ip_dict["http_type"] == 'HTTPS'):
        proxy_temp = {"https": proxy_host}
    elif (ip_dict["http_type"] == 'HTTP'):
        proxy_temp = {"http": proxy_host}
    else:
        print("既不是http，也不是https请求")
    print("构造出来的proxy是："+str(proxy_temp))
    return proxy_temp

def generate_proxy_content_per(temp_ip):
    print("获取到的ip list是"+str(temp_ip))
    ip_dict = {}
    ip_dict["http_type"] = temp_ip[2]
    ip_dict["ip"] = temp_ip[0]
    ip_dict["port"] = temp_ip[1]
    #print(ip_dict)

    proxy_host = "http://" + ip_dict["ip"] + ":" + ip_dict["port"]
    if (ip_dict["http_type"] == 'HTTPS'):
        proxy_temp = {"https": proxy_host}
    elif (ip_dict["http_type"] == 'HTTP'):
        proxy_temp = {"http": proxy_host}
    else:
        print("既不是http，也不是https请求")
    print("构造出来的proxy是："+str(proxy_temp))
    return proxy_temp

def call_website(count,url,num_retries=6):
    try:
        proxys=generate_proxy_content()
        get_content(count,url,proxys)
    except:
        t, v, tb = sys.exc_info()
        # traceback.print_tb(tb)
        #traceback.print_tb(tb)
        if num_retries > 0:
            print(u"正在更换代理，10s后将重新获取第", num_retries, u"次")
            time.sleep(10)
            return call_website(count, url, num_retries - 1)
        else:
            print(u"代理发生错误，重新构造代理")
            return call_website(count,url)

def call_website_loop(count,url):
    ip_list = get_proxy_list()
    print("一共获取了"+str(len(ip_list))+"个IP")
    for ip in ip_list:
        proxy=generate_proxy_content_per(ip)
        print(">>>>>>>>>>>开始第" + str(count) + "次请求:" + url)
        ua = random.choice(user_agent_list)  # 从user_agent_list中随机抽取出一个字符串
        # print(ua)
        header = {"User-Agent": ua}
        try:
            requests.packages.urllib3.disable_warnings()
            html_body_opener = requests.get(url, verify=False, headers=header,timeout=5,proxies=proxy).content
            html_body_source = html_body_opener.decode("unicode-escape")
            print(">>>>>>>>>>>第" + str(count) + "次请求成功:" + url)
            break
        except:
            print("请求失败，更换代理IP")
            continue

if __name__== '__main__':
    start_time=time.time()
    begin_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    print("开始执行时间" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
    page_base_url = 'https://db.yaozh.com/hmap/'
    for page_num in range(1,11):
        print(">>>>>>>>>>>>>>>>>>>>>>>>>>Page"+str(page_num)+" start<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
        #print(page_num)
        hospital_url = page_base_url + str(page_num) + '.html'
        call_website_loop(page_num,hospital_url)
        print(">>>>>>>>>>>>>>>>>>>>>>>>>>Page" + str(
            page_num) + " end<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
    print("共计耗时" + str(time.time() - start_time) + 's')





