# -*- coding=utf-8 -*-

import time
import urllib.parse
import requests
import sys
import re
import os
import os.path
from lxml import etree

"""
主要目的是在demo1的基础上，
将https://db.yaozh.com/hmap/38994.html中的页面中的数据放到所属的文件夹中，
不能匹配的就放到extra文件夹中
url是https://db.yaozh.com/hmap/38994.html
"""

sum_retries=1000
level1_dir='output'
level2_dir='demo2'
dir_extra='extra'
page_base_url='https://db.yaozh.com/hmap/'
start_page_num=1
end_page_num=100

def group_count(groups,count):
    groups = groups

    values = [i for i in range(1, count+1)]
    values.sort(reverse=True)

    load_balance_groups = [[] for grp in range(groups)]

    for v in values:
        load_balance_groups.sort(key=lambda x: sum(x))
        load_balance_groups[0].append(v)
    return load_balance_groups

def generate_province_list(province_msg):
    pat_province_name = '"name":"([\s\S]*?)","val"'
    province_html_body_list = re.compile(pat_province_name).findall(province_msg)
    #print(province_html_body_list[0])
    province_list=list()
    for i in range(1,len(province_html_body_list)):
        province_list.append(province_html_body_list[i])
    #print(province_list)
    return province_list

#requests.get("http://example.org", proxies=proxies)
def request_webpage_url(url ,proxies,num_retries):
    #print('Downloading:', url)
    try:
        requests.packages.urllib3.disable_warnings()
        body_response = requests.get(url, verify=False, proxies=proxies)
    except :
        t, v, tb = sys.exc_info()
        # traceback.print_tb(tb)
        #traceback.print_tb(tb)
        body_response = None
        if num_retries > 0:
            print("request请求失败，开始重试，第"+str(sum_retries-num_retries+1)+"次")
            time.sleep(5)
            return request_webpage_url(url,proxies, num_retries-1)
    return body_response

def request_web_body(url):
    regex_rule='data-names="province" data-list=([\s\S]*?)data-src="">'
    html_body_response=request_webpage_url(url,None,sum_retries)
    html_body_source=html_body_response.content.decode("unicode-escape")
    #print(html_body_source)
    result=re.compile(regex_rule).findall(html_body_source)

    #print("匹配长度是：" + str(len(result)))
    if len(result) != 1:
        time.sleep(5)
        print("请求回来的网页有问题，正在重试")
        return request_web_body(url)
    else:
        return result[0]

def create_dir(dirpath):
    # 判断路径是否存在
    # 存在     True
    # 不存在   False
    isExists = os.path.exists(dirpath)

    if not isExists:
        os.makedirs(dirpath)
        #print("目录创建成功:" + dirpath)
        return True
    else:
        # 如果目录存在则不创建，并提示目录已存在
        #print('目录已存在:' + dirpath)
        return False

def root_dir_initial():
    dir_base = os.path.abspath('.')

    dir_root=os.path.join(dir_base,level1_dir)
    #print(time.strftime("%Y%m%d%H%M%S", time.localtime()))
    data_dir_root_shortname=level2_dir+'_'+time.strftime("%Y%m%d%H%M00", time.localtime())
    data_dir_root_fullpath=os.path.join(dir_root,data_dir_root_shortname)
    #print(data_dir_root_fullpath)

    create_dir(data_dir_root_fullpath)
    return data_dir_root_fullpath

def initial():
    data_dir_root=root_dir_initial()
    base_grade = urllib.parse.quote("全部")
    base_type = urllib.parse.quote("全部")
    base_p = 1
    base_pagesize = 30
    base_province = urllib.parse.quote("北京市")
    base_url = "https://db.yaozh.com/hmap?name=&grade=%s&address=&type=%s&bedstr=&bedend=&province=%s" % (
    base_grade, base_type, base_province)
    province_msg = request_web_body(base_url)
    province_list = generate_province_list(province_msg)

    return province_list,data_dir_root

def initial_province_dir(parentdirpath,province_list):
    for province_name in province_list:
        province_root_dir=os.path.join(parentdirpath,province_name)
        create_dir(province_root_dir)
    create_dir(os.path.join(data_dir_root,dir_extra))
    print("文件夹初始化成功")

def request_hospital_body(url):
    print(url)
    #regex_rule='省</th>([\s\S]*?)</span>'
    html_body_response=request_webpage_url(url,None,sum_retries)
    html_body_source=html_body_response.content.decode("utf-8")
    #print(html_body_source)
    root = etree.HTML(html_body_response.content)
    tds = root.xpath('//tr[./th/text()="省"]/td/span')

    #print("匹配长度是：" + str(len(result)))
    if len(tds) != 1:
        time.sleep(5)
        print("请求回来的网页有问题，正在重试")
        return request_hospital_body(url)
    else:
        province_name = tds[0].text.replace(' ', '').replace("\n", "")
        #print(province_name)
        return province_name,html_body_source


'''
@to 将在列表中的省市放到所属文件夹中，如果不在列表中，就放到extra文件夹中
'''

def save_page_source_into_txt(tgt_file_root_dir,tgt_file_extra_dir,province_name,file_content,province_list,file_index):
    file_fullpath=None
    if province_name in province_list:
        file_fullpath=os.path.join(tgt_file_root_dir,province_name,str(file_index)+'.txt')
        #print("省市在列表中")
    else:
        file_fullpath=os.path.join(tgt_file_extra_dir,str(file_index)+'.txt')
        print("省市不在列表中:"+province_name)
    #保存文件内容到所属文件夹中
    write_to_txt(file_content,file_fullpath)


def write_to_txt(content, filename):
    # 如果文件存在就删除
    if (os.path.exists(filename)):
        os.remove(filename)

    outfile = open(filename, "w", encoding='utf-8')
    outfile.write(content)
    outfile.close()
    print("文件保存成功:"+filename)

if __name__ == '__main__':
    start_time=time.time()
    begin_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    print("开始执行时间"+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))

    province_list,data_dir_root=initial()
    #print(province_list)
    initial_province_dir(data_dir_root,province_list)
    extra_dir_full_path = os.path.join(data_dir_root, dir_extra)

    time.sleep(20)
    page_num=38994

    for page_num in range(start_page_num,end_page_num):
        # hotpital_url="https://db.yaozh.com/hmap/38994.html"
        hospital_url = page_base_url + str(page_num) + '.html'
        province_name, page_body_source = request_hospital_body(hospital_url)

        save_page_source_into_txt(data_dir_root, extra_dir_full_path, province_name, page_body_source, province_list,
                                  page_num)

    print("共计耗时"+str(time.time()-start_time)+'s')



