#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import sys
sys.path.append('..\kernel')
import spider_main
# 抓取
class CityController(object):
    def __init__(self):
        #实例化核心对象
        self.spider = spider_main.SpiderMain()
    def index(self):
        names=list()
        tu_data=self.tu()
        # huo_data=self.huo()
        for tu_val in tu_data:
            names.append('张' + '翎' + tu_val)
        for tu_val in tu_data:
            names.append('张' + tu_val + '翎')
        print(names)

    def tu(self):
        names_data = list()
        url = 'http://xh.5156edu.com/wx/tu.html'
        # 下载页面
        html_cont = self.spider.downloader.downloader(url)
        html_obj = BeautifulSoup(html_cont, 'html.parser')
        # 开始节点,从这个开始节点开始往下遍历获取所有的省份和城市名单
        # start_node=html_obj.find('div',class_='level-3',attrs={'label-module':'para-title'})
        names = html_obj.find_all('a', class_='fontbox')
        for na in names:
            names_data.append(na.string)
        return names_data;

    def huo(self):
        names_data = list()
        url = 'http://xh.5156edu.com/wx/huo.html'
        # 下载页面
        html_cont = self.spider.downloader.downloader(url)
        html_obj = BeautifulSoup(html_cont, 'html.parser')
        # 开始节点,从这个开始节点开始往下遍历获取所有的省份和城市名单
        # start_node=html_obj.find('div',class_='level-3',attrs={'label-module':'para-title'})
        names = html_obj.find_all('a', class_='fontbox')
        for na in names:
            names_data.append(na.string)
        return names_data;

if __name__=='__main__':
    obj=CityController()
    obj.index()