import random

from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import csv
import os
from datetime import datetime

class SpiderHongheikuPopulationRankData():
    def __init__(self):
        self.host = 'https://www.hongheiku.com'
        self.dir = './document/hongheiku/'
        self.first_dir = './document/'

    def __get_population_rank_by_province__(self):
        '''全国各省最新人口排名'''
        print('全国各省最新人口排名数据爬虫开始执行，请稍后。。。')
        driver = self.__create_driver__(self.host)

        title = driver.find_element(by=By.TAG_NAME, value='article').find_elements(by=By.TAG_NAME, value='p')[1].find_element(by=By.TAG_NAME, value='strong').text
        rank_elements = driver.find_element(by=By.TAG_NAME, value='article').find_element(by=By.TAG_NAME, value='table').find_element(by=By.TAG_NAME, value='tbody').find_elements(by=By.TAG_NAME, value='tr')
        header_data = []
        sec_header_data = ['', '']
        row_datas = []
        for i in range(0, len(rank_elements)):
            tds = rank_elements[i].find_elements(by=By.TAG_NAME, value='td')
            if i == 0:
                for j in range(0, len(tds)):
                    if j == 3:
                        header_data.append('')
                    header_data.append(tds[j].text)
            elif i == 1:
                for j in range(0, len(tds)):
                    sec_header_data.append(tds[j].text)
            else:
                row_data = []
                for j in range(0, len(tds)):
                    row_data.append(tds[j].text)
                row_datas.append(row_data)

        self.__export_csv__(title, header_data, sec_header_data, row_datas)
        time.sleep(5)
        driver.quit()

    def __get_population_rank_from_province__(self, province_name):
        '''省市最新人口排名'''
        print(f'[{province_name}]省市最新人口排名数据爬虫开始执行，请稍后。。。')
        driver = self.__create_driver__(self.host + f'/tag/{province_name}')

        title = f'{province_name}最新人口排名数据'
        rank_elements = driver.find_element(by=By.TAG_NAME, value='article').find_element(by=By.TAG_NAME, value='table').find_element(by=By.TAG_NAME, value='tbody').find_elements(by=By.TAG_NAME, value='tr')
        header_data = ['排名', '地区', '常住人口', '六普人口']
        row_datas = []
        for i in range(0, len(rank_elements)):
            tds = rank_elements[i].find_elements(by=By.TAG_NAME, value='td')
            if tds is not None and len(tds) > 0:
                row_data = []
                for j in range(0, len(tds)):
                    row_data.append(tds[j].text)
                row_datas.append(row_data)

        self.__export_csv__(title, header_data, [], row_datas)
        time.sleep(5)
        driver.quit()

    def __get_gdp_rank_by_province__(self):
        '''全国各省GDP排名'''
        print('全国各省最新GDP排名数据爬虫开始执行，请稍后。。。')
        driver = self.__create_driver__(self.host + '/tag/各省GDP')

        title = '全国各省最新GDP排名数据'
        rank_elements = driver.find_element(by=By.TAG_NAME, value='article').find_element(by=By.TAG_NAME, value='table').find_element(by=By.TAG_NAME, value='tbody').find_elements(by=By.TAG_NAME, value='tr')
        header_data = ['排名', '地区', 'GDP', '所属年度']
        row_datas = []
        for i in range(0, len(rank_elements)):
            tds = rank_elements[i].find_elements(by=By.TAG_NAME, value='td')
            if tds is not None and len(tds) > 0:
                row_data = []
                for j in range(0, len(tds)):
                    row_data.append(tds[j].text)
                row_datas.append(row_data)

        self.__export_csv__(title, header_data, [], row_datas)
        time.sleep(5)
        driver.quit()

    def __get_disposable_income_rank_by_province__(self):
        '''全国各省可支配收入排名'''
        print('全国各省最新可支配收入排名数据爬虫开始执行，请稍后。。。')
        driver = self.__create_driver__(self.host + '/tag/各省收入')

        title = '全国各省最新可支配收入排名数据'
        rank_elements = driver.find_element(by=By.TAG_NAME, value='article').find_element(by=By.TAG_NAME, value='table').find_element(by=By.TAG_NAME, value='tbody').find_elements(by=By.TAG_NAME, value='tr')
        header_data = ['排名', '地区', '城镇居民收入', '乡村居民收入']
        row_datas = []
        for i in range(0, len(rank_elements)):
            tds = rank_elements[i].find_elements(by=By.TAG_NAME, value='td')
            if tds is not None and len(tds) > 0:
                row_data = []
                for j in range(0, len(tds)):
                    row_data.append(tds[j].text)
                row_datas.append(row_data)

        self.__export_csv__(title, header_data, [], row_datas)
        time.sleep(5)
        driver.quit()

    def __get_gdp_rank_by_city__(self):
        '''全国各地级市GDP排名'''
        print('全国各地级市GDP排名数据爬虫开始执行，请稍后。。。')
        url = self.host + '/category/gdjsgdp'
        driver = self.__create_driver__(url)
        title = '全国各地级市GDP排名数据'
        total_page = int(driver.find_element(by=By.CLASS_NAME, value='content').find_element(by=By.CLASS_NAME, value='pagination').\
            find_element(by=By.TAG_NAME, value='ul').find_elements(by=By.TAG_NAME, value='li')[-2].find_element(by=By.TAG_NAME, value='a').get_attribute('href')[-1])
        header_data = ['排名', '地区', 'GDP', '所属年度']
        row_datas = []
        for page_num in range(1, total_page):
            if page_num > 1:
                driver = self.__create_driver__(url + '/page/' + str(page_num))
                time.sleep(random.randint(1, 3))
            rank_elements = driver.find_element(by=By.TAG_NAME, value='article').find_element(by=By.TAG_NAME, value='table'). \
                find_element(by=By.TAG_NAME, value='tbody').find_elements(by=By.TAG_NAME, value='tr')
            for i in range(0, len(rank_elements)):
                tds = rank_elements[i].find_elements(by=By.TAG_NAME, value='td')
                if tds is not None and len(tds) > 0:
                    row_data = []
                    for j in range(0, len(tds)):
                        row_data.append(tds[j].text)
                    row_datas.append(row_data)

        self.__export_csv__(title, header_data, [], row_datas)
        time.sleep(5)
        driver.quit()

    def __get_population_rank_from_word__(self, name):
        '''世界各地区最新人口排名'''
        print(f'{name}最新人口排名数据爬虫开始执行，请稍后。。。')
        driver = self.__create_driver__(self.host + f'/tag/{name}')

        title = f'{name}所属区最新人口排名数据'
        rank_elements = driver.find_element(by=By.TAG_NAME, value='article').find_element(by=By.TAG_NAME, value='table').find_element(by=By.TAG_NAME, value='tbody').find_elements(by=By.TAG_NAME, value='tr')
        header_data = ['排名', '地区', '常住人口']
        row_datas = []
        for i in range(0, len(rank_elements)):
            tds = rank_elements[i].find_elements(by=By.TAG_NAME, value='td')
            if tds is not None and len(tds) > 0:
                row_data = []
                for j in range(0, len(tds)-1):
                    row_data.append(tds[j].text)
                row_datas.append(row_data)

        self.__export_csv__(title, header_data, [], row_datas)
        time.sleep(5)
        driver.quit()

    def __create_driver__(self, url):
        '''创建浏览器驱动'''
        # 开启无界面模式
        options = webdriver.ChromeOptions()
        options.add_argument("--headless")
        options.add_argument("--disable-gpu")
        driver = webdriver.Chrome(options=options)
        driver.get(url)
        driver.implicitly_wait(3)
        return driver

    def __export_csv__(self, title, header_data, sec_header_data, row_datas):
        '''导出CSV'''
        self.__create_dir__(self.dir, self.first_dir)

        with open(self.dir + title + '.csv', 'w', newline='') as file:
            writer = csv.writer(file)
            writer.writerow(header_data)
            if sec_header_data is not None and len(sec_header_data) > 0:
                writer.writerow(sec_header_data)
            writer.writerows(row_datas)

    def __create_dir__(self, dir, first_dir):
        '''创建目录'''
        # 判断该文件夹是否存在
        if os.path.exists(first_dir) is False:
            # 不存在则创建
            os.mkdir(first_dir)
            os.mkdir(dir)
        elif os.path.exists(dir) is False:
            os.mkdir(dir)

if __name__ == '__main__':
    start_time = datetime.now()
    spider = SpiderHongheikuPopulationRankData()
    # spider.__get_population_rank_by_province__()

    # 输入值有以下几类：
    # 1、按省份名获取【例如：广东、北京、新疆等等】
    # 1、按城市名获取【例如：广州、深圳、西安等等】
    # spider.__get_population_rank_from_province__('广东')

    # 输入值有以下几类：
    # 1、按大陆板块名获取【亚洲、非洲、欧洲、北美洲、南美洲、大洋洲】
    # 2、按世界各国名获取【例如：美国、日本、英国等等】，注意：部分国家查不到详细数据，可以通过方式1来查询总人口数据
    # spider.__get_population_rank_from_word__('非洲')

    # spider.__get_gdp_rank_by_province__()

    # spider.__get_disposable_income_rank_by_province__()

    spider.__get_gdp_rank_by_city__()


    end_time = datetime.now()
    use_time = end_time -start_time
    print('本此爬取共耗时：{}ms'.format(use_time.seconds*1000 + use_time.microseconds/1000))
