# -*- coding=utf-8 -*-

import time
import urllib.parse
import requests
import sys
import re
import os
import os.path
from lxml import etree
from bs4 import BeautifulSoup
import random


"""
主要目的是在demo1的基础上，
将https://db.yaozh.com/hmap/38994.html中的页面中的数据放到所属的文件夹中，
不能匹配的就放到extra文件夹中
url是https://db.yaozh.com/hmap/38994.html
"""

sum_retries=1000
level1_dir='output'
level2_dir='demo2'
dir_extra='extra'
page_base_url='https://db.yaozh.com/hmap/'
start_page_num=1
end_page_num=10


user_agent_list=[
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]

def group_count(groups,count):
    groups = groups

    values = [i for i in range(1, count+1)]
    values.sort(reverse=True)

    load_balance_groups = [[] for grp in range(groups)]

    for v in values:
        load_balance_groups.sort(key=lambda x: sum(x))
        load_balance_groups[0].append(v)
    return load_balance_groups

def generate_province_list(province_msg):
    pat_province_name = '"name":"([\s\S]*?)","val"'
    province_html_body_list = re.compile(pat_province_name).findall(province_msg)
    #print(province_html_body_list[0])
    province_list=list()
    for i in range(1,len(province_html_body_list)):
        province_list.append(province_html_body_list[i])
    #print(province_list)
    return province_list

#requests.get("http://example.org", proxies=proxies)
def request_webpage_url(url ,proxies,header,num_retries):
    #print('Downloading:', url)
    try:
        requests.packages.urllib3.disable_warnings()
        body_response = requests.get(url, verify=False,headers=header,proxies=proxies,timeout=3)
    except :
        t, v, tb = sys.exc_info()
        # traceback.print_tb(tb)
        #traceback.print_tb(tb)
        body_response = None
        if num_retries > 0:
            print("request请求失败，开始重试，第"+str(sum_retries-num_retries+1)+"次")
            time.sleep(5)
            return request_webpage_url(url,proxies,header, num_retries-1)
    return body_response

def request_webpage_url_loop(url ,proxies,header):
    #print('Downloading:', url)
    body_response = None
    try:
        requests.packages.urllib3.disable_warnings()
        body_response = requests.get(url, verify=False,headers=header,proxies=proxies,timeout=3)
    except :
        t, v, tb = sys.exc_info()
        # traceback.print_tb(tb)
        #traceback.print_tb(tb)
        raise
    return body_response

def request_web_body(url):
    ip_list = get_proxy_list()
    result=None

    for ip in ip_list:
        proxy = generate_proxy_content_per(ip)
        ua = random.choice(user_agent_list)  # 从user_agent_list中随机抽取出一个字符串
        # print(ua)
        header = {"User-Agent": ua}
        try:
            regex_rule = 'data-names="province" data-list=([\s\S]*?)data-src="">'
            html_body_response = request_webpage_url_loop(url, proxy,header)
            html_body_source = html_body_response.content.decode("unicode-escape")
            # print(html_body_source)
            result = re.compile(regex_rule).findall(html_body_source)
            break
        except:
            print("请求失败，更换代理IP")
            continue

            # print("匹配长度是：" + str(len(result)))
    if len(result) != 1:
        time.sleep(5)
        print("请求回来的网页有问题，正在重试")
        return request_web_body(url)
    else:
        return result[0]


def create_dir(dirpath):
    # 判断路径是否存在
    # 存在     True
    # 不存在   False
    isExists = os.path.exists(dirpath)

    if not isExists:
        os.makedirs(dirpath)
        #print("目录创建成功:" + dirpath)
        return True
    else:
        # 如果目录存在则不创建，并提示目录已存在
        #print('目录已存在:' + dirpath)
        return False

def root_dir_initial():
    dir_base = os.path.abspath('.')

    dir_root=os.path.join(dir_base,level1_dir)
    #print(time.strftime("%Y%m%d%H%M%S", time.localtime()))
    data_dir_root_shortname=level2_dir+'_'+time.strftime("%Y%m%d%H%M00", time.localtime())
    data_dir_root_fullpath=os.path.join(dir_root,data_dir_root_shortname)
    #print(data_dir_root_fullpath)

    create_dir(data_dir_root_fullpath)
    return data_dir_root_fullpath

def initial():
    data_dir_root=root_dir_initial()
    base_grade = urllib.parse.quote("全部")
    base_type = urllib.parse.quote("全部")
    base_p = 1
    base_pagesize = 30
    base_province = urllib.parse.quote("北京市")
    base_url = "https://db.yaozh.com/hmap?name=&grade=%s&address=&type=%s&bedstr=&bedend=&province=%s" % (
    base_grade, base_type, base_province)
    province_msg = request_web_body(base_url)
    province_list = generate_province_list(province_msg)

    return province_list,data_dir_root

def initial_province_dir(parentdirpath,province_list):
    for province_name in province_list:
        province_root_dir=os.path.join(parentdirpath,province_name)
        create_dir(province_root_dir)
    create_dir(os.path.join(data_dir_root,dir_extra))
    print("文件夹初始化成功")

def request_hospital_body(url):
    print(url)
    #regex_rule='省</th>([\s\S]*?)</span>'
    html_body_response=request_webpage_url(url,None,None,sum_retries)
    html_body_source=html_body_response.content.decode("utf-8")
    #print(html_body_source)
    root = etree.HTML(html_body_response.content)
    tds = root.xpath('//tr[./th/text()="省"]/td/span')

    #print("匹配长度是：" + str(len(result)))
    if len(tds) != 1:
        time.sleep(5)
        print("请求回来的网页有问题，正在重试")
        return request_hospital_body(url)
    else:
        province_name = tds[0].text.replace(' ', '').replace("\n", "")
        #print(province_name)
        return province_name,html_body_source


'''
@to 将在列表中的省市放到所属文件夹中，如果不在列表中，就放到extra文件夹中
'''

def save_page_source_into_txt(tgt_file_root_dir,tgt_file_extra_dir,province_name,file_content,province_list,file_index):
    file_fullpath=None
    if province_name in province_list:
        file_fullpath=os.path.join(tgt_file_root_dir,province_name,str(file_index)+'.txt')
        #print("省市在列表中")
    else:
        file_fullpath=os.path.join(tgt_file_extra_dir,str(file_index)+'.txt')
        print("省市不在列表中:"+province_name)
    #保存文件内容到所属文件夹中
    write_to_txt(file_content,file_fullpath)

def read_file_content(file_name):
    file_object = open(file_name, 'r', encoding='utf-8')
    try:
        all_the_text = file_object.read()
    finally:
        file_object.close()

    return all_the_text

def write_to_txt(content, filename):
    # 如果文件存在就删除
    if (os.path.exists(filename)):
        os.remove(filename)

    outfile = open(filename, "w", encoding='utf-8')
    outfile.write(content)
    outfile.close()
    print("文件保存成功:"+filename)

def generate_proxy_content_per(temp_ip):
    print("获取到的ip list是"+str(temp_ip))
    ip_dict = {}
    ip_dict["http_type"] = temp_ip[2]
    ip_dict["ip"] = temp_ip[0]
    ip_dict["port"] = temp_ip[1]
    #print(ip_dict)

    proxy_host = "http://" + ip_dict["ip"] + ":" + ip_dict["port"]
    if (ip_dict["http_type"] == 'HTTPS'):
        proxy_temp = {"https": proxy_host}
    elif (ip_dict["http_type"] == 'HTTP'):
        proxy_temp = {"http": proxy_host}
    else:
        print("既不是http，也不是https请求")
    print("构造出来的proxy是："+str(proxy_temp))
    return proxy_temp

def get_proxy_list():
    file_fullpath='proxy_web_source.txt'
    if not os.path.exists(file_fullpath):
        User_Agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0'
        header = {}
        header['User-Agent'] = User_Agent
        #proxy = {'http': 'http://125.89.54.194:9797','https': 'http://222.29.118.229:1080'}
        # url = 'http://www.xicidaili.com/nn/1'
        url = 'http://www.xicidaili.com/nt'
        req = requests.get(url, headers=header, proxies=None)
        res = req.content.decode("utf-8")
        # print(res)

        write_to_txt(res, file_fullpath)
        print("成功保存代理网页源代码到本地")
    else:
        print("代理网页已在本地保存")

    res=read_file_content(file_fullpath)
    soup = BeautifulSoup(res, "html.parser")
    ips = soup.findAll('tr')

    ip_list = []
    for x in range(1, len(ips)):
        iplist_temp = []
        ip = ips[x]
        tds = ip.findAll("td")
        ip_temp = tds[1].text
        port_temp = tds[2].text
        port_http_type = tds[5].text
        #print(ip_temp + "\t" + port_temp+ "\t" + port_http_type+ "\n")
        iplist_temp.append(ip_temp)
        iplist_temp.append(port_temp)
        iplist_temp.append(port_http_type)
        ip_list.append(iplist_temp)
    print("成功获取iplist")
    return ip_list

def call_website_loop(count,url):
    ip_list = get_proxy_list()
    html_body_opener=None
    print("一共获取了"+str(len(ip_list))+"个IP")
    for ip in ip_list:
        proxy=generate_proxy_content_per(ip)
        print(">>>>>>>>>>>开始第" + str(count) + "次请求:" + url)
        ua = random.choice(user_agent_list)  # 从user_agent_list中随机抽取出一个字符串
        # print(ua)
        header = {"User-Agent": ua}
        try:
            requests.packages.urllib3.disable_warnings()
            html_body_opener = requests.get(url, verify=False, headers=header,timeout=3,proxies=proxy)
            #html_body_source = html_body_opener.content.decode("unicode-escape")
            html_body_source = html_body_opener.content.decode("utf-8")
            print(">>>>>>>>>>>第" + str(count) + "次请求成功:" + url)

            break
        except:
            print("请求失败，更换代理IP")
            continue

    root = etree.HTML(html_body_opener.content)
    tds = root.xpath('//tr[./th/text()="省"]/td/span')

    # print("匹配长度是：" + str(len(result)))
    if len(tds) != 1:
        time.sleep(5)
        print("请求回来的网页有问题，正在重试")
        return call_website_loop(count,url)
    else:
        province_name = tds[0].text.replace(' ', '').replace("\n", "")
        # print(province_name)
        return province_name, html_body_source

if __name__ == '__main__':
    start_time=time.time()
    begin_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    print("开始执行时间"+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))

    province_list,data_dir_root=initial()
    #print(province_list)
    initial_province_dir(data_dir_root,province_list)
    extra_dir_full_path = os.path.join(data_dir_root, dir_extra)

    time.sleep(20)
    page_num=38994

    for page_num in range(start_page_num,end_page_num):
        # hotpital_url="https://db.yaozh.com/hmap/38994.html"
        hospital_url = page_base_url + str(page_num) + '.html'

        province_name, page_body_source=call_website_loop(page_num, hospital_url)
        #province_name, page_body_source = request_hospital_body(hospital_url)

        save_page_source_into_txt(data_dir_root, extra_dir_full_path, province_name, page_body_source, province_list,
                                  page_num)

    print("共计耗时"+str(time.time()-start_time)+'s')



