from urllib.request import urlopen
from urllib.request import Request
from urllib import parse
from bs4 import BeautifulSoup as bs
import pymysql.cursors

"""
从学生体质健康网获取学校ID和学校名称
信息包含省/直辖市编码名称、地市编码及名称、区县编码及名称、学校编码及名称
"""
host = "127.0.0.1"
user = "root"
password = "root"
db = "common"


class school_info:
    """
       学校信息
    """

    def __init__(self, school_id=None, school_name=None, cic_code=None, province_id=None, province_name=None,
                 city_id=None, city_name=None, district_id=None, district_name=None, detail_addre=None):
        self.school_id = school_id
        self.school_name = school_name
        self.cic_code = cic_code
        self.province_id = province_id
        self.province_name = province_name
        self.city_id = city_id
        self.city_name = city_name
        self.district_id = district_id
        self.district_name = district_name
        self.detail_addre = detail_addre


def get_csh_page(url, parent_id, page_size, page_index):
    if page_size == None:
        # 获得服务器返回的数据
        response = urlopen(url)
        # 处理数据
        return response.read().decode("utf-8")
    else:
        # post请求
        post_data = parse.urlencode([
            ("mdepartmentExt.xxjgmc", ""),
            # ("mdepartmentExt.type", 4),
            #  ("cityId", parent_id),
            ("mdepartmentExt.parentId", parent_id),
            ("pageSize", page_size),
            ("pageIndex", page_index)
        ])
        # 创建请求对象
        req = Request(url)
        req.add_header("Host", "www.csh.edu.cn")
        req.add_header("Origin", "http://www.csh.edu.cn")
        req.add_header("User-Agent",
                       "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36")
        # 获得服务器返回的数据
        response = urlopen(req, data=post_data.encode("utf-8"))
        # 处理数据
        return response.read().decode("utf-8")


def get_schools(province_id, province_name, city_id, city_name, district_id, district_name, district_url):
    page = get_csh_page("http://www.csh.edu.cn%s" % district_url, city_id, 10000, 1)
    school_list = []
    soup = bs(page, "html.parser")
    for child in soup.select("table > tr"):
        if child.select_one("td.h_tdListCenter") is not None and child.select_one("td.h_tdListCenter1") is not None:
            school_name = child.select_one("td.h_tdListCenter").string.strip()
            school_id = child.select_one("td.h_tdListCenter1").string.strip()
            cic_code = school_id[0:2]
            province_id = province_id[0:2]
            city_id = city_id[0:4]
            district_id = district_id[0:6]
            print(
                "province_id=%s, province_name=%s, city_id=%s, city_name=%s, district_id=%s, district_name=%s,school_id=%s ,school_name=%s" %
                (province_id, province_name, city_id, city_name, district_id, district_name, school_id, school_name))
            if school_id != None and school_id != "" and school_name != None and school_name != "":
                school_list.append(
                    school_info(school_id, school_name, cic_code, province_id, province_name, city_id, city_name,
                                district_id, district_name))
    return school_list


def get_districts(province_id, province_name, city_id, city_name, city_url):
    """
    :param province_id: 省级ID
    :param province_name: 省份名称
    :param city_id: 地市ID
    :param city_name: 地市名称
    :param city_url: 地市对应URL
    :return:
    """
    # 处理数据
    page = get_csh_page("http://www.csh.edu.cn%s" % city_url, city_id, 1, 1)
    soup = bs(page, "html.parser")
    i = 0
    for child in soup.select("div.h_select_line"):
        if i == 2:
            for district_child in child.select("div.h_select_line_right > a"):
                print("开始下载区县：%s" % (district_child.string.strip()))
                school_list = get_schools(province_id, province_name, city_id, city_name,
                                          district_child.get("href").strip()[-12:],
                                          district_child.string.strip(),
                                          district_child.get("href").strip())
                if school_list != None and len(school_list) > 0:
                    # 获取数据连接
                    connection = pymysql.connect(host=host, user=user, password=password, db=db, charset="utf8mb4")
                    try:
                        # 获取会话指针
                        with connection.cursor() as cursor:
                            # 创建sql语句
                            sql = "replace into `school_info`(`school_id`,`school_name`,`cic_code`,`province_id`," \
                                  "`province_name`,`city_id`,`city_name`,`district_id`,`district_name`) values "
                            i = 0
                            for si in school_list:
                                sql += "(%s,'%s',%s,%s,'%s',%s,'%s',%s,'%s')" % (
                                    si.school_id, si.school_name, si.cic_code, si.province_id,
                                    si.province_name, si.city_id, si.city_name, si.district_id,
                                    si.district_name)
                                if i < len(school_list) - 1:
                                    sql += ","
                                i += 1
                            # 执行sql语句
                            cursor.execute(sql)
                            # 提交
                            connection.commit()
                    finally:
                        connection.close()
                print("区县“%s”已下载结束！" % (district_child.string.strip()))
        i += 1


def get_citys(province_id, province_name, province_url):
    """
    获取地市列表
    :param province_id: 省份ID
    :param province_name: 省份名称
    :param province_url: 省份URL
    :return: 地市列表
    """
    print("开始下载省份：" + province_name)
    url = "http://www.csh.edu.cn%s" % (province_url)
    page = get_csh_page(url, province_id, None, 1)
    soup = bs(page, "html.parser")
    i = 0
    for child in soup.select("div.h_select_line"):
        if i == 1:
            for city_child in child.select("div.h_select_line_right > a"):
                print("开始下载地市：%s" % (city_child.string.strip()))
                get_districts(province_id, province_name, city_child.get("href").strip()[-12:],
                              city_child.string.strip(),
                              city_child.get("href").strip())
                print("地市“%s”已下载结束！" % (city_child.string.strip()))
        i += 1
    print("省份“%s”已下载结束！" % (province_name))


def get_provinces(province_ids):
    """
    获取省份列表及URL
    :param province_ids: 省份ID元组
    :return:
    """
    url = "http://www.csh.edu.cn/moetc/mdepartmentExtAction!toMdepartmentExtListWdOuter.action"
    page = get_csh_page(url, None, None, None)
    # 随意取一个省市信息，主要用于取出省份名称及代码URL
    soup = bs(page, "html.parser")
    i = 0
    for child in soup.select("div.h_select_line"):
        if i == 0:
            for province_child in child.select("div.h_select_line_right > a"):
                if province_ids is not None:
                    for m in range(len(province_ids)):
                        if str(province_ids[m]) == province_child.get("href").strip()[-12:-10]:
                            get_citys(province_child.get("href").strip()[-12:], province_child.string.strip(),
                                      province_child.get("href").strip())
                else:
                    get_citys(province_child.get("href").strip()[-12:], province_child.string.strip(),
                              province_child.get("href").strip())
        i += 1


if __name__ == '__main__':
    # get_provinces(None)
    # 安徽省
    #  get_provinces((34))
    # 西藏自治区 陕西省 甘肃省 青海省 宁夏回族自治区 新疆维吾尔自治区 新疆建设兵团
   # get_provinces((54, 61, 62, 63, 64, 65))
    # 新疆建设兵团
      get_provinces((1,66))
      