# -*- coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
import pandas as pd
#import os
import re

def request_detal(url):
    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240','Connection': 'Keep-Alive','Referer':'http://www.mzitu.com/tag/baoru/'}
    content = requests.get(url, headers=headers)
    html_content = content.text
    # print(html_content)
    # 使用BeautifulSoup解析HTML
    soup = BeautifulSoup(html_content, 'html.parser')
    if soup is None:
        return 
    # 通过类名精准查找包含幼儿园信息的div
    div = soup.find('div', class_='mdml_tab_div')

    # 在找到的div中查找表格
    table = div.find('table')

    # 初始化一个空字典来存储幼儿园的属性
    kindergarten_info = {}

    # 遍历表格中的所有行
    for row in table.find_all('tr'):
        # 获取当前行的所有单元格
        cells = row.find_all('td')
        # 检查单元格数量，确保它们成对出现
        if len(cells) % 2 == 0:
            # 将属性名称和属性值添加到字典中
            for i in range(0, len(cells), 2):
                key = cells[i].get_text(strip=True).strip('：')
                value = cells[i+1].get_text(strip=True)
                if key == '收费标准':
                    # 分别提取保教费和伙食费
                    # fees = value.split('，')
                    fees = re.split('，|；|;', value)
                    for fee in fees:
                        if '保教费' in fee:
                            kindergarten_info['保教费'] = fee.split('：')[1].replace("/月", "").replace("元", "")
                        elif '伙食费' in fee:
                            kindergarten_info['伙食费'] = fee.split('：')[1].replace("/月", "").replace("元", "")
                else:
                    kindergarten_info[key] = value

    # 创建DataFrame
    df = pd.DataFrame([kindergarten_info])
    return df
   

def request_page(url):
    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240','Connection': 'Keep-Alive','Referer':'http://www.mzitu.com/tag/baoru/'}
    content = requests.get(url, headers=headers)
    html_code = content.text
    
    soup = BeautifulSoup(html_code, 'lxml')  # 或者使用 'html.parser'

    # 查找包含'common-list'类的<ul>元素
    common_list_ul = soup.find('ul', class_='common-list')
    df = pd.DataFrame()
    # 如果找到了<ul>元素，继续查找所有的<li>标签
    if common_list_ul:
        list_items = common_list_ul.find_all('li')

        # 遍历所有的<li>标签
        for li in list_items:
            # 在每个<li>标签中查找<a>标签
            a_tag = li.find('a')
            if a_tag:
                # 获取幼儿园的名称和链接
                kindergarten_name = a_tag.text.strip()
                kindergarten_link = a_tag['href']
                # df = df.append({'幼儿园名称': kindergarten_name, '链接': kindergarten_link}, ignore_index=True)
                detals = request_detal(kindergarten_link)
                detals['链接'] = kindergarten_link
                df = pd.concat([df, detals])
                

    else:
        print("没有找到包含'common-list'类的<ul>元素")
    # print(df)
    return df
    
    
def request_all():  
    page = 1
    df = pd.DataFrame()
    while True:
        if page == 1:
            url = 'http://www.lg.gov.cn/zwfw/zdfw/jy/mdml/yeymdml/index.html'
        else:
            url = 'http://www.lg.gov.cn/zwfw/zdfw/jy/mdml/yeymdml/index_%d.html' % (page)
        page += 1
        result = request_page(url)
        if len(result) > 0:
            print(result)
            # df = df.append(result)
            df = pd.concat([df, result])
        else:
            break
    return df
        
# request_detal('http://www.lg.gov.cn/zwfw/zdfw/jy/mdml/yeymdml/content/post_9379825.html')        
result = request_all()
result.set_index('幼儿园名称', inplace=True)
result = result[~result.index.duplicated(keep='first')]
result.to_csv('output.csv', index=True)