import chardet
import requests
import pandas as pd
from bs4 import BeautifulSoup
from lxml import html

# 基层网站地址
base_url = 'http://www.tcmap.com.cn/hubei/'
# 目标网址
url = base_url+'yichangshi.html'
headers = {'Accept-Charset': 'UTF-8'}
# 发送HTTP请求
response = requests.get(url)
encoding = chardet.detect(response.content)['encoding']
print(f"返回编码为: "+encoding)
# response.encoding = 'utf-8'  # 根据网页的编码格式进行设置
response.encoding = encoding

print(response.status_code)

# 检查请求是否成功
if response.status_code == 200:

    # 获取HTML内容
    html_content = response.text

    # 使用lxml的html模块来解析HTML并执行XPath查询
    tree = html.fromstring(html_content)
    soup_table = tree.xpath("//*[@id='page_left']/table[2]")[0]

    # if soup_table is not None:
    #     # 获取表格的所有行
    #     rows = soup_table.xpath(".//tr")
    #     # 遍历每一行
    #     for row in rows:
    #         # 获取当前行的所有单元格
    #         cells = row.xpath(".//td|.//th")
    #
    #         # 遍历每个单元格，打印其文本内容
    #         for cell in cells:
    #             print(cell.text_content().strip())  # 使用text_content()获取所有文本，strip()去除首尾空白字符
    # else:
    #     print("没有找到指定的表格")

    # 如果 soup_table 空的话，说明没有找到指定的表格，可以进行其他处理，例如返回错误信息等。
    if soup_table is None:
        print("没有找到指定的表格")
        exit()

    # 将lxml元素转换为字符串
    # table_str = html.tostring(soup_table, encoding='unicode')
    table_str = html.tostring(soup_table, encoding=encoding)
    # 使用BeautifulSoup解析字符串
    table = BeautifulSoup(table_str, 'html.parser')

    # 将lxml的Element对象转换为BeautifulSoup对象
    # table = BeautifulSoup(str(soup_table), 'lxml')
    print(table)

    # 提取街道数据
    districts = []
    for row in table.find_all('tr')[1:]:  # 跳过表头
        columns = row.find_all('td')
        if len(columns) > 1:
            # 区
            district_name = columns[0].text.strip()
            district_code = columns[1].text.strip()
            area = columns[2].text.strip()
            # 查找第一个div标签
            div_tag = columns[3].find('div')
            # 查找第一个div之后的所有兄弟节点div
            # 街道
            div_siblings = div_tag.find_next_siblings('div')

            # 在第一个div里面有href连接，在新页面打开后提取页面内容
            for div in div_siblings:
                # 提取链接
                link = div.find('a')
                if link:
                    href = base_url+link['href']
                    # 打开链接并提取页面内容
                    response = requests.get(href)
                    response.encoding = encoding
                    html_content = response.text
                    soup = BeautifulSoup(html_content, 'html.parser')
                    # 提取页面内容
                    intro = soup.find('div', class_='content').text.strip()
                    print(intro)

            # 提取详情文本
            intro = div_tag.text.strip()

            districts.append({
                'name': district_name,
                'code': district_code,
                'area': area,
                'introduction': intro
            })
    
    # 打印结果
    for district in districts:
        print(f"Name: {district['name']}, Code: {district['code']}, Area: {district['area']}, Intro: {district['introduction']}")

    # 将数据存储到Excel文件
    # df = pd.DataFrame(districts)
    # df.to_excel('street_data.xlsx', index=False)
    # print("Data saved to street_data.xlsx")

else:
    print(f"Failed to retrieve data, status code: {response.status_code}")