from urllib.request import urlopen
from urllib.request import Request
from urllib import parse
from bs4 import BeautifulSoup as bs
import pymysql.cursors

"""
从学生体质健康网获取省市县代码
信息包含省/直辖市编码名称、地市编码及名称、区县编码及名称
"""


def addArea(id, name, perant_id):
    """
    插入数据
    :param id: 编码
    :param name: 名称
    :param perant_id: 上级编码
    :return:
    """
    # 获取数据连接
    connection = pymysql.connect(host="127.0.0.1", user="root", password="root", db="ems", charset="utf8mb4")
    try:
        # 获取会话指针
        with connection.cursor() as cursor:
            # 创建sql语句
            sql = "replace into `common_area`(`id`,`name`,`perant_id`) values(%s,%s,%s)"
            # 执行sql语句
            cursor.execute(sql, (id, name, perant_id))
            # 提交
            connection.commit()
    finally:
        connection.close()


def getDistricts(province_id, province_name, city_id, city_name, city_url):
    """
    :param province_id:
    :param province_name:
    :param city_id:
    :param city_name:
    :param city_url:
    :return:
    """
    post_data = parse.urlencode([
        ("mdepartmentExt.xxjgmc", ""),
        ("mdepartmentExt.parentId", city_id),
        ("pageSize", 1),
        ("pageIndex", 1)
    ])
    # 创建请求对象
    req = Request("http://www.csh.edu.cn%s" % city_url)
    req.add_header("Host", "www.csh.edu.cn")
    req.add_header("Origin", "http://www.csh.edu.cn")
    req.add_header("User-Agent",
                   "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36")
    # 获得服务器返回的数据
    response = urlopen(req, data=post_data.encode("utf-8"))
    # 处理数据
    page = response.read().decode("utf-8")
    soup = bs(page, "html.parser")
    i = 0
    for child in soup.select("div.h_select_line"):
        if i == 2:
            for cityChild in child.select("div.h_select_line_right > a"):
                addArea(cityChild.get("href").strip()[-12:-6], cityChild.string.strip(), city_id[0:4])
        i += 1


def getCitys(province_id, province_name):
    addArea(province_id[0:2],province_name, 0)
    # 点击省份获取一级列表
    province_url = "http://www.csh.edu.cn/moetc/mdepartmentExtAction!toMdepartmentExtListWdOuter.action?cityId=%s&mdepartmentExt.type=3" % (
        province_id)
    post_data = parse.urlencode([
        ("mdepartmentExt.xxjgmc", ""),
        ("mdepartmentExt.parentId", province_id),
        ("pageSize", 1),
        ("pageIndex", 1)
    ])
    # 创建请求对象
    req = Request(province_url)
    req.add_header("Host", "www.csh.edu.cn")
    req.add_header("Origin", "http://www.csh.edu.cn")
    req.add_header("User-Agent",
                   "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36")
    # 获得服务器返回的数据
    response = urlopen(req, data=post_data.encode("utf-8"))
    # 处理数据
    page = response.read().decode("utf-8")
    soup = bs(page, "html.parser")
    i = 0
    for child in soup.select("div.h_select_line"):
        if i == 1:
            for cityChild in child.select("div.h_select_line_right > a"):
                getDistricts(province_id, province_name, cityChild.get("href").strip()[-12:], cityChild.string.strip(),
                             cityChild.get("href").strip())
                addArea(cityChild.get("href").strip()[-12:-8], cityChild.string.strip(), province_id[0:2])
        i += 1


getCitys("340000000000", "安徽省")
