import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
from datetime import datetime
import time
import os
from pathlib import Path
import textwrap


def get_line_suffixes(base_url, prefixes):
    """获取指定前缀的线路后缀 - 增强稳定性"""
    suffixes = {}
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }

    for prefix in prefixes:
        try:
            url = f"{base_url}/list{prefix}"
            print(f"正在获取以{prefix}开头的线路列表: {url}")
            response = requests.get(url, headers=headers, timeout=15)
            response.raise_for_status()

            # 检查响应内容是否有效
            if not response.text.strip():
                print(f"警告: 获取{prefix}开头的线路列表时返回空内容")
                continue

            soup = BeautifulSoup(response.text, 'html.parser')

            # 查找线路链接 - 使用更稳健的选择器
            links = soup.select('.list.clearfix a')
            if not links:
                links = soup.select('a[href*="/x_"]')

            for item in links:
                href = item.get('href')
                if href and '/x_' in href:
                    line_name = item.get_text().strip()
                    if not line_name:
                        # 尝试从其他属性获取线路名
                        line_name = item.get('title', '未知线路')

                    line_suffix = href.split('/')[-1]
                    suffixes[line_name] = line_suffix
                    print(f"找到线路: {line_name} - 后缀: {line_suffix}")

            time.sleep(1.5)  # 稍微缩短延迟

        except Exception as e:
            print(f"获取{prefix}开头的线路时出错: {str(e)}")

    return suffixes


def get_bus_line_details(base_url, suffix):
    """获取单条线路详细信息 - 增强错误处理"""
    url = f"{base_url}/{suffix}"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }

    try:
        print(f"正在爬取线路: {url}")
        response = requests.get(url, headers=headers, timeout=15)
        response.raise_for_status()

        # 检查响应内容是否有效
        if not response.text.strip():
            print(f"警告: 获取线路{suffix}时返回空内容")
            return None

        soup = BeautifulSoup(response.text, 'html.parser')

        # 获取基础线路信息
        line_info = {
            "线路名称": extract_line_name(soup),
            "线路类型": extract_line_type(soup),
            "运营时间": extract_operation_time(soup),
            "票价信息": extract_fare(soup),
            "更新时间": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "数据来源": url,
            "爬取状态": "成功"
        }

        # 获取站点信息
        stations = extract_stations(soup)
        line_info["站点数量"] = len(stations)
        line_info["站点列表"] = stations

        return line_info

    except requests.exceptions.Timeout:
        print(f"爬取线路超时: {url}")
        return {
            "线路名称": f"超时线路_{suffix}",
            "爬取状态": "超时",
            "数据来源": url
        }
    except requests.exceptions.HTTPError as e:
        print(f"HTTP错误: {e.response.status_code} - {url}")
        return {
            "线路名称": f"错误线路_{suffix}",
            "爬取状态": f"HTTP错误 {e.response.status_code}",
            "数据来源": url
        }
    except Exception as e:
        print(f"爬取线路数据时出错: {str(e)}")
        return {
            "线路名称": f"错误线路_{suffix}",
            "爬取状态": f"错误: {str(e)}",
            "数据来源": url
        }


def extract_line_name(soup):
    """提取线路名称 - 增强鲁棒性"""
    # 尝试多种选择器
    title_selectors = [
        ('h1', {'class': 'title'}),
        ('h1', {'class': 'detail-title'}),
        ('h1', {}),
        ('div', {'class': 'title'}),
        ('div', {'class': 'bus-line'})
    ]

    for tag, attrs in title_selectors:
        title = soup.find(tag, attrs)
        if title:
            text = title.get_text().strip()
            if text:
                return text

    # 作为最后手段，尝试从URL或页面标题获取
    title_tag = soup.find('title')
    if title_tag:
        title_text = title_tag.get_text().strip()
        # 尝试从标题文本中提取线路名称
        match = re.search(r'(.+线路|路|号)', title_text)
        if match:
            return match.group(1)

    return "未知线路"


def extract_line_type(soup):
    """提取线路类型 - 支持更多类型"""
    type_selectors = [
        ('span', {'class': 'line-type'}),
        ('span', {'class': 'type-tag'}),
        ('div', {'class': 'line-type'}),
        ('span', {'class': 'tag'})
    ]

    for tag, attrs in type_selectors:
        type_tag = soup.find(tag, attrs)
        if type_tag:
            text = type_tag.get_text().strip()
            if text:
                # 标准化线路类型
                if '夜' in text or 'night' in text.lower():
                    return "夜班线路"
                if '快' in text or 'express' in text.lower():
                    return "快线"
                if '支' in text or 'branch' in text.lower():
                    return "支线"
                if '专' in text or 'special' in text.lower():
                    return "专线"
                return text

    # 尝试从线路名称推断
    line_name = extract_line_name(soup)
    if '夜' in line_name:
        return "夜班线路"
    if '快' in line_name:
        return "快线"
    if '支' in line_name:
        return "支线"
    if '专' in line_name:
        return "专线"

    return "常规公交"


def extract_operation_time(soup):
    """提取运营时间 - 增强版本"""
    # 策略1: 直接定位已知的8684.cn时间元素
    time_container = soup.find('div', class_='bus-desc')
    if not time_container:
        time_container = soup.find('div', class_='info')

    if time_container:
        # 尝试在容器内查找具体的时间元素
        time_elements = time_container.find_all(string=re.compile(r'[0-9]{1,2}:[0-9]{2}'))
        if len(time_elements) >= 2:
            return f"{time_elements[0].strip()} - {time_elements[1].strip()}"

        # 检查容器内的文本
        container_text = time_container.get_text()
        time_matches = re.findall(r'(\d{1,2}:\d{2})', container_text)
        if len(time_matches) >= 2:
            return f"{time_matches[0]} - {time_matches[1]}"

    # 策略2: 搜索整个页面中的时间模式
    all_text = soup.get_text()
    time_matches = re.findall(r'\d{1,2}:\d{2}', all_text)
    if len(time_matches) >= 2:
        # 尝试找到最可能的时间范围（排除明显不是运营时间的数字）
        valid_times = [t for t in time_matches if int(t.split(':')[0]) < 24]
        if len(valid_times) >= 2:
            return f"{valid_times[0]} - {valid_times[1]}"

    # 策略3: 尝试从站点列表中的时间标签提取
    time_spans = soup.find_all('span', class_=re.compile('time|first|last'))
    if len(time_spans) >= 2:
        times = [span.get_text().strip() for span in time_spans[:2]]
        if all(re.match(r'\d{1,2}:\d{2}', t) for t in times):
            return f"{times[0]} - {times[1]}"

    return "未知"


def extract_fare(soup):
    """提取票价信息 - 增强版"""
    # 策略1: 搜索票价关键词
    fare_keywords = ["票价", "票制", "元", "¥", "￥"]
    for text in soup.find_all(string=re.compile('|'.join(fare_keywords))):
        text_content = text.strip()
        if any(kw in text_content for kw in fare_keywords):
            # 尝试提取具体金额
            match = re.search(r'(\d+(?:\.\d+)?)\s*元', text_content)
            if match:
                return f"{match.group(1)}元"
            return text_content

    # 策略2: 在描述区域搜索
    desc = soup.find('div', class_='bus-desc') or soup.find('div', class_='info')
    if desc:
        for li in desc.find_all('li'):
            if '票价' in li.text:
                # 尝试提取具体金额
                match = re.search(r'(\d+(?:\.\d+)?)\s*元', li.text)
                if match:
                    return f"{match.group(1)}元"
                return li.text.strip()

    return "未知"


def extract_stations(soup):
    """提取所有站点信息 - 增强版"""
    stations = []

    # 查找所有可能的站点列表区域
    station_selectors = [
        ('div', {'class': 'bus-lzlist'}),
        ('ul', {'class': 'bus-list'}),
        ('div', {'class': 'station-list'}),
        ('div', {'class': 'bus-station'}),
        ('ul', {'class': 'list'})
    ]

    station_lists = []
    for tag, attrs in station_selectors:
        station_lists.extend(soup.find_all(tag, attrs))

    if not station_lists:
        # 尝试备用选择器
        station_lists = soup.select('div[class*="station"], ul[class*="list"]')

    for i, station_list in enumerate(station_lists):
        # 确定方向
        direction = "上行" if i % 2 == 0 else "下行"

        # 尝试从附近的标题获取方向信息
        prev_title = station_list.find_previous(['h3', 'h4', 'h5', 'strong'])
        if prev_title:
            title_text = prev_title.get_text().strip()
            if '上行' in title_text:
                direction = '上行'
            elif '下行' in title_text:
                direction = '下行'
            elif '去程' in title_text:
                direction = '上行'
            elif '回程' in title_text:
                direction = '下行'
            elif '往' in title_text:
                direction = '上行' if '往' in title_text else '下行'

        # 提取站点
        station_items = []

        # 尝试多种站点元素选择器
        item_selectors = [
            station_list.find_all('li'),
            station_list.find_all('a', class_='station'),
            station_list.find_all('span', class_='station-name'),
            station_list.find_all('div', class_='station'),
            station_list.find_all('span', class_='name')
        ]

        for items in item_selectors:
            if items:
                station_items = items
                break

        # 如果仍然没有找到，尝试直接查找所有子元素
        if not station_items:
            station_items = station_list.find_all(True)

        for order, item in enumerate(station_items, 1):
            station_name = item.get_text().strip()
            # 清理无效字符和空名
            station_name = re.sub(r'[\n\r\t→↓↑]', '', station_name).strip()

            if station_name and len(station_name) >= 2:  # 确保有实际内容
                stations.append({
                    "方向": direction,
                    "站点顺序": order,
                    "站点名称": station_name
                })

    return stations


def preprocess_data(data_list):
    """数据预处理函数"""
    processed = []

    for line in data_list:
        # 1. 标准化线路名称
        line_name = line.get("线路名称", "")
        if "线路" not in line_name and "路" not in line_name:
            # 尝试从名称中提取线路号
            match = re.search(r'(\d+|[A-Za-z]\d+)', line_name)
            if match:
                line_name = f"{match.group(1)}路"
            else:
                line_name = f"{line_name}线路"

        # 2. 处理运营时间
        operation_time = line.get("运营时间", "未知")
        if operation_time == "未知":
            operation_time = "06:00 - 22:00"  # 默认值
        else:
            # 标准化时间格式
            times = re.findall(r'\d{1,2}:\d{2}', operation_time)
            if len(times) == 2:
                operation_time = f"{times[0]} - {times[1]}"

        # 3. 处理票价信息
        fare = line.get("票价信息", "未知")
        if fare == "未知":
            fare = "2元"  # 默认值
        else:
            # 提取具体金额
            match = re.search(r'(\d+(?:\.\d+)?)\s*元', fare)
            if match:
                fare = f"{match.group(1)}元"

        # 4. 处理站点信息
        stations = line.get("站点列表", [])
        # 确保站点顺序正确
        if stations:
            # 按方向分组
            up_stations = [s for s in stations if s["方向"] == "上行"]
            down_stations = [s for s in stations if s["方向"] == "下行"]

            # 重新排序
            for i, station in enumerate(up_stations, 1):
                station["站点顺序"] = i
            for i, station in enumerate(down_stations, 1):
                station["站点顺序"] = i

            # 合并
            stations = up_stations + down_stations

        # 创建处理后的线路数据
        processed_line = {
            "线路ID": f"SY{len(processed) + 1:04d}",
            "线路名称": line_name,
            "线路类型": line.get("线路类型", "常规公交"),
            "运营时间": operation_time,
            "票价信息": fare,
            "站点数量": len(stations),
            "更新时间": line.get("更新时间", datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
            "数据来源": line.get("数据来源", ""),
            "爬取状态": line.get("爬取状态", "成功")
        }

        # 添加站点列表
        processed_line["站点列表"] = stations

        processed.append(processed_line)

    return processed


def format_data_for_display(data):
    """格式化数据用于显示"""
    formatted = []
    for line in data:
        # 基本信息
        info = f"线路ID: {line['线路ID']}\n"
        info += f"线路名称: {line['线路名称']}\n"
        info += f"类型: {line['线路类型']} | 站点数: {line['站点数量']}\n"
        info += f"运营时间: {line['运营时间']} | 票价: {line['票价信息']}\n"
        info += f"更新时间: {line['更新时间']}\n"
        info += f"数据来源: {line['数据来源']}\n"

        # 站点信息
        if line['站点数量'] > 0:
            stations_text = "站点列表:\n"

            # 按方向分组站点
            up_stations = [s for s in line['站点列表'] if s['方向'] == '上行']
            down_stations = [s for s in line['站点列表'] if s['方向'] == '下行']

            if up_stations:
                stations_text += "【上行】:\n"
                stations_text += " → ".join([f"{s['站点顺序']}.{s['站点名称']}" for s in up_stations]) + "\n"

            if down_stations:
                stations_text += "【下行】:\n"
                stations_text += " → ".join([f"{s['站点顺序']}.{s['站点名称']}" for s in down_stations]) + "\n"

            info += stations_text

        formatted.append(info)

    return formatted


def save_to_csv(data_list, base_filename='长沙'
                                         ' 公交'):
    """保存多条线路数据到CSV文件 - 增强版"""
    try:
        # 创建保存目录
        save_dir = Path.home() / "公交数据"
        save_dir.mkdir(parents=True, exist_ok=True)

        # 创建带时间戳的文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"{base_filename}_{timestamp}.csv"
        filepath = save_dir / filename

        # 预处理数据
        processed_data = preprocess_data(data_list)

        # 创建线路信息DataFrame
        lines_data = []
        for data in processed_data:
            line_data = {
                "线路ID": data["线路ID"],
                "线路名称": data["线路名称"],
                "线路类型": data["线路类型"],
                "运营时间": data["运营时间"],
                "票价信息": data["票价信息"],
                "站点数量": data["站点数量"],
                "更新时间": data["更新时间"],
                "数据来源": data["数据来源"],
                "爬取状态": data["爬取状态"]
            }
            lines_data.append(line_data)

        lines_df = pd.DataFrame(lines_data)

        # 优化CSV排版
        # 1. 设置列宽
        col_widths = {
            "线路ID": 8,
            "线路名称": 20,
            "线路类型": 10,
            "运营时间": 20,
            "票价信息": 8,
            "站点数量": 6,
            "更新时间": 20,
            "数据来源": 40,
            "爬取状态": 15
        }

        # 2. 保存CSV
        lines_df.to_csv(filepath, index=False, encoding='utf-8-sig')
        print(f"线路信息已保存到 {filepath}")

        # 创建站点信息DataFrame
        all_stations = []
        for data in processed_data:
            if data.get('站点列表'):
                for station in data['站点列表']:
                    station_data = {
                        "线路ID": data["线路ID"],
                        "线路名称": data["线路名称"],
                        "方向": station["方向"],
                        "站点顺序": station["站点顺序"],
                        "站点名称": station["站点名称"]
                    }
                    all_stations.append(station_data)

        if all_stations:
            stations_filename = f"{base_filename}_站点_{timestamp}.csv"
            stations_filepath = save_dir / stations_filename
            stations_df = pd.DataFrame(all_stations)

            # 优化站点CSV排版
            stations_col_widths = {
                "线路ID": 8,
                "线路名称": 20,
                "方向": 5,
                "站点顺序": 5,
                "站点名称": 30
            }

            stations_df.to_csv(stations_filepath, index=False, encoding='utf-8-sig')
            print(f"成功保存 {len(all_stations)} 个站点信息到 {stations_filepath}")

        # 创建数据摘要文件
        summary_filename = f"{base_filename}_摘要_{timestamp}.txt"
        summary_filepath = save_dir / summary_filename

        with open(summary_filepath, 'w', encoding='utf-8') as f:
            f.write(f"长沙公交数据采集报告\n")
            f.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(f"采集线路总数: {len(processed_data)}\n")

            # 统计成功和失败的线路
            success_count = sum(1 for d in processed_data if d['爬取状态'] == '成功')
            error_count = len(processed_data) - success_count
            f.write(f"成功采集: {success_count} 条线路\n")
            f.write(f"采集失败: {error_count} 条线路\n")

            # 线路类型统计
            type_counts = {}
            for data in processed_data:
                line_type = data['线路类型']
                type_counts[line_type] = type_counts.get(line_type, 0) + 1

            f.write("\n线路类型分布:\n")
            for line_type, count in type_counts.items():
                f.write(f"{line_type}: {count} 条\n")

            # 站点数量统计
            if success_count > 0:
                total_stations = sum(d['站点数量'] for d in processed_data if d['爬取状态'] == '成功')
                avg_stations = total_stations / success_count
                f.write(f"\n总站点数: {total_stations}\n")
                f.write(f"平均每条线路站点数: {avg_stations:.1f}\n")

            # 列出所有线路
            f.write("\n采集线路列表:\n")
            for i, data in enumerate(processed_data, 1):
                status = "✓" if data['爬取状态'] == '成功' else "✗"
                f.write(f"{i:3d}. [{status}] {data['线路名称']} ({data['线路类型']})\n")

        print(f"数据摘要已保存到 {summary_filepath}")

        # 返回格式化数据用于显示
        return format_data_for_display(processed_data)

    except Exception as e:
        print(f"保存文件时出错: {e}")
        return None


if __name__ == '__main__':
    base_url = ('https://changsha.8684.com.cn/'
                '')
    # 数字前缀和字母/汉字前缀
    prefixes = ['1', '2', '3', '5', '7', '8', 'G', 'H', 'K', 'P', 'Q', 'S', 'T', 'V', 'X', 'Y', 'Z']

    print("=" * 50)
    print("长沙公交数据采集系统")
    print("=" * 50)

    # 1. 获取线路后缀
    print("\n步骤1: 获取线路列表...")
    line_suffixes = get_line_suffixes(base_url, prefixes)

    if not line_suffixes:
        print("未能获取任何线路后缀")
    else:
        print(f"\n获取到 {len(line_suffixes)} 条线路")

        # 2. 爬取每条线路的详细信息
        print("\n步骤2: 爬取线路详细信息...")
        all_lines_data = []
        total_lines = len(line_suffixes)
        success_count = 0

        for i, (line_name, suffix) in enumerate(line_suffixes.items(), 1):
            print(f"\n[{i}/{total_lines}] 正在处理: {line_name}")
            line_data = get_bus_line_details(base_url, suffix)
            if line_data:
                all_lines_data.append(line_data)
                if line_data.get("爬取状态") == "成功":
                    success_count += 1
                    print(f"√ 成功获取线路: {line_name}")
                else:
                    print(f"× 获取线路失败: {line_name}")
            else:
                print(f"× 获取线路失败: {line_name}")

            time.sleep(0.8)  # 添加延迟防止被封

            # 显示进度
            if i % 5 == 0 or i == total_lines:
                print(f"\n进度: {i}/{total_lines} | 成功: {success_count} | 失败: {i - success_count}")

        if all_lines_data:
            # 3. 保存并显示数据
            print("\n步骤3: 处理并保存数据...")
            formatted_data = save_to_csv(all_lines_data, base_filename='长沙公交')

            if formatted_data:
                print("\n" + "=" * 50)
                print("公交线路汇总信息")
                print("=" * 50)

                for i, data in enumerate(formatted_data, 1):
                    print(f"\n线路 #{i}:")
                    print(data)
                    print("-" * 50)
        else:
            print("未能获取任何公交线路数据")