import requests
from bs4 import BeautifulSoup
import pandas as pd
import os
from datetime import datetime
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service

class MTLConcertScraper:
    def __init__(self):
        self.base_url = "https://m.moretickets.com/uni"
        chrome_options = Options()
        # chrome_options.add_argument('--headless')  # 无头模式
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        
        try:
            service = Service(ChromeDriverManager().install())
            self.driver = webdriver.Chrome(service=service, options=chrome_options)
        except Exception as e:
            print(f"启动 Chrome 时出错: {str(e)}")
            raise
        self.wait = WebDriverWait(self.driver, 10)
        self.concert_data = []
        
    def get_concert_list(self, page=1):
        url = f"{self.base_url}/"
        print(f"正在访问URL: {url}")
        self.driver.get(url)
        
        try:
            # 增加等待时间到30秒
            self.wait = WebDriverWait(self.driver, 30)
            
            # 添加更多打印信息来调试
            print("等待页面加载...")
            
            # 尝试等待任意show-card元素出现，而不是等待app元素
            self.wait.until(
                EC.presence_of_element_located((By.CLASS_NAME, "show-card"))
            )
            
            print("页面加载完成")
            
            # 获取渲染后的HTML
            html_content = self.driver.page_source
            return BeautifulSoup(html_content, 'html.parser')
            
        except Exception as e:
            print(f"加载页面时出错: {str(e)}")
            # 保存页面源码以供调试
            with open('error_page.html', 'w', encoding='utf-8') as f:
                f.write(self.driver.page_source)
            print("已将错误页面保存到 error_page.html")
            return None

    def parse_concert_info(self, concert):
        try:
            # 查找城市信息
            city_element = concert.find('div', class_='city-desc')
            city = city_element.text.strip() if city_element else ''
            print("city", city)
            # 查找演唱会名称
            name_element = concert.find('uni-view', class_='name')
            title = name_element.find('span').text.strip() if name_element else ''
            
            # 查找时间信息
            time_element = concert.find('uni-view', class_='time')
            time_info = time_element.text.strip() if time_element else ''
            
            # 查找状态信息
            status_element = concert.find('uni-view', class_='status-desc')
            status = status_element.text.strip() if status_element else ''
            
            # 查找价格信息
            price_element = concert.find('uni-view', class_='price')
            price = price_element.text.strip() if price_element else ''
            
            return {
                '城市': city,
                '演唱会名称': title,
                '演出时间': time_info,
                '演出状态': status,
                '价格': price
            }
        except Exception as e:
            print(f"解析错误: {str(e)}")
            return None

    def scrape_concerts(self, max_pages=10):
        soup = self.get_concert_list()
        if not soup:
            return
            
        # 查找所有演唱会卡片
        concerts = soup.find_all('uni-view', class_='show-card')
        
        for concert in concerts:
            concert_info = self.parse_concert_info(concert)
            if concert_info:
                self.concert_data.append(concert_info)
        
        time.sleep(1)  # 避免请求过于频繁
        
    def save_to_excel(self):
        if not self.concert_data:
            print("没有数据可保存")
            return
            
        df = pd.DataFrame(self.concert_data)
        filename = f'concert_data_{datetime.now().strftime("%Y%m%d_%H%M%S")}.xlsx'
        df.to_excel(filename, index=False, encoding='utf-8')
        print(f"数据已保存到文件: {filename}")

    def __del__(self):
        if hasattr(self, 'driver'):
            self.driver.quit()

def main():
    scraper = MTLConcertScraper()
    print("开始爬取演唱会数据...")
    scraper.scrape_concerts()
    scraper.save_to_excel()
    print("爬取完成！")

if __name__ == "__main__":
    main()