# China Administrative Regions
# By kevin 
# Email:kevinwolfstudio@qq.com
# coding=utf-8

import urllib.request
from bs4 import BeautifulSoup
import uuid
import pymysql
import pypinyin
from pypinyin import pinyin, lazy_pinyin

mysql_host = '192.168.1.75'
mysql_db = 'qinyuan'
mysql_user = 'root'
mysql_password = 'root'
mysql_port = 3306

Stitching_URL = 'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2017/'

class Pinyin:
     def Get_pinyin(self,cn_name):
            getname_ID = 1
            For_name = 15
            get_name = ''
            if cn_name[len(cn_name) - 3:] == "自治州":
                For_name = len(cn_name) - 3
            elif cn_name[len(cn_name) - 1:] == "市" or cn_name[len(cn_name) - 1:] == "盟":
                For_name = len(cn_name) - 1

            get_cn_name = lazy_pinyin(cn_name)

            for i in get_cn_name:
                if getname_ID <= For_name:
                    get_name = get_name + i
                getname_ID = getname_ID + 1

            return(get_name)

class City_Name:
    def Get_City_Name(self,province_ID,ProvinceName,ProvinceID_URL):
        url = ProvinceID_URL
        Get_URL_content = Html_Content()
        Cleaning_content_targets_url = Get_URL_content.GetHtmlContent(url)
        get_CountyName = County_Name()
        n = 0 #换行结算符

        for each in Cleaning_content_targets_url:
                if len(each.get('href')) < 15:
                    n = n + 1
                    if n % 2 != 0:
                        City_Name_ID = each.get_text()[:4]
                    else:
                        City_Name = each.get_text()
                        City_url = Stitching_URL + each.get('href')
                        get_CountyName.Get_County_Name(province_ID,ProvinceName,City_Name,City_url,City_Name_ID)
        return()

class County_Name:
    def Get_County_Name(self,province_ID,ProvinceName,City_Name,City_url,City_Name_ID):

        db = pymysql.connect(host=mysql_host, user=mysql_user, password=mysql_password, db=mysql_db, port=mysql_port,charset="utf8")

        cursor = db.cursor()
        County_Name_ID = ''
        County_Name = ''
        province_ID = province_ID + '0'
        get_pinyin = Pinyin()
        url = City_url
        Get_URL_content = Html_Content()
        Cleaning_content_targets_url = Get_URL_content.GetHtmlContent(url)
        k = 0  # 换行结算符
        
        for each in Cleaning_content_targets_url:
            if len(each.get('href')) < 15:
                k = k + 1
                if k % 2 != 0:
                    County_Name_ID = each.get_text()[:6]
                else:
                    County_Name = each.get_text()

                    if City_Name == '市辖区':
                        City_Name = ProvinceName
                    uuid_data = uuid.uuid1()

                    province_pinyin = get_pinyin.Get_pinyin(ProvinceName)
                    City_Name_pinyin = get_pinyin.Get_pinyin(City_Name)
                    County_Name_pinyin = get_pinyin.Get_pinyin(County_Name)

                    #user插入数据
                    sql = """INSERT INTO `address_new` VALUES
                    ('%s','%s','%s','%s','%d','%s','%s','%s','%s','%s','%s','%s');"""
                    data = (uuid_data, City_Name_ID, City_Name, City_Name_pinyin, 3, City_Name, province_ID, ProvinceName, province_pinyin, County_Name_ID, County_Name, County_Name_pinyin)


                    #执行sql语句
                    cursor.execute(sql % data)
                    #提交到数据库执行
                    db.commit()
        return ()

# 获取URL页面内容，此处获取源为 National Bureau of Statistics 国家统计局
class Html_Content:
        def GetHtmlContent(self,url):
                Http_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                                             'Chrome/51.0.2704.63 Safari/537.36'} #伪装浏览器信息
                req = urllib.request.Request(url=url, headers=Http_headers)
                res = urllib.request.urlopen(req)
                HtmlContent_data = res.read() #获取URL页面数据
                HtmlContent_data = HtmlContent_data.decode('GBK', "ignore") #转URL页面数据编码为GBK
                Cleaning_content = BeautifulSoup(HtmlContent_data, 'html.parser')
                Cleaning_content_targets_url = Cleaning_content.find_all('a') #获取URL页面数据中所有<a>标签内容
                return Cleaning_content_targets_url


if __name__ == '__main__':
    url = Stitching_URL + 'index.html'
    Get_URL_content = Html_Content()
    Cleaning_content_targets_url = Get_URL_content.GetHtmlContent(url)
    getCity_Name = City_Name()

    for each in Cleaning_content_targets_url:
        if len(each.get('href')) < 15:
            province_ID = each.get('href')[:2]
            ProvinceName = (each.get_text())
            province_url = (Stitching_URL + each.get('href'))
            getCity_Name.Get_City_Name(province_ID,ProvinceName, province_url)



