"""上海空气质量数据爬虫
用于从上海市生态环境局网站抓取每日空气质量数据
使用Selenium实现，支持无头模式，保持会话不关闭
支持随机User-Agent，避免被网站封锁
"""

import pandas as pd
import time
import logging
import random
import os
import sys
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

# 添加项目根目录到系统路径，确保能够导入其他模块
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 导入数据库相关模块
from scraper.data_saver import save_data_to_mysql, init_db
# 使用本地ChromeDriver

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)

logger = logging.getLogger('ShanghaiScraper')

class ShanghaiAirQualityScraper:
    """上海空气质量数据爬虫"""
    
    # 用户代理池，包含各种浏览器和设备的User-Agent字符串
    USER_AGENTS = [
        # Windows Chrome
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
         # macOS Safari
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 12_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.2 Safari/605.1.15",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.3 Safari/605.1.15",
        # macOS Chrome
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36",
        # iOS Safari
        "Mozilla/5.0 (iPhone; CPU iPhone OS 15_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.0 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (iPad; CPU OS 15_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.0 Mobile/15E148 Safari/604.1",
        # Android Chrome
        "Mozilla/5.0 (Linux; Android 12; SM-G991B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.104 Mobile Safari/537.36",
        "Mozilla/5.0 (Linux; Android 12; Pixel 6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.87 Mobile Safari/537.36"
    ]

    def __init__(self, headless=True, use_kafka=False):
        """初始化爬虫
        
        Args:
            headless: 是否使用无头模式，默认为True
            use_kafka: 是否使用Kafka发送数据，默认为False
        """
        self.url = 'https://sthj.sh.gov.cn/kqzlssfb/index.html'
        self.driver = None
        self.headless = headless
        self.use_kafka = use_kafka
        self.kafka_producer = None
        
        # 如果启用Kafka，初始化Kafka生产者
        if self.use_kafka:
            try:
                # 这里应该导入Kafka相关模块并初始化生产者
                # 由于当前实现重点不在Kafka，所以只是添加占位代码
                logger.info("初始化Kafka生产者")
                # self.kafka_producer = KafkaProducer(...)
            except Exception as e:
                logger.error(f"Kafka生产者初始化失败: {str(e)}")
        
        self.setup_driver()
        
    def setup_driver(self):
        """设置Selenium WebDriver"""
        try:
            chrome_options = Options()
            
            chrome_options.add_argument('--headless')
            chrome_options.add_argument('--no-sandbox')
            chrome_options.add_argument('--disable-dev-shm-usage')
            chrome_options.add_argument('--disable-gpu')
            chrome_options.add_argument('--window-size=1920,1080')
            
            # 随机选择一个User-Agent
            random_user_agent = random.choice(self.USER_AGENTS)
            # 反检测参数
            chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
            chrome_options.add_experimental_option("useAutomationExtension", False)
            chrome_options.add_argument('--disable-blink-features=AutomationControlled')
            chrome_options.add_argument('--user-agent={}'.format(random_user_agent))
            chrome_options.add_argument('--disable-software-rasterizer')
            chrome_options.add_argument('--enable-features=SharedArrayBuffer')
            
            # 扩展User-Agent池
            mobile_agents = [
                "Mozilla/5.0 (Linux; Android 13; SM-S901B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Mobile Safari/537.36",
                "Mozilla/5.0 (iPhone14,3; U; CPU iPhone OS 15_0 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/19A346 Safari/602.1"
            ]
            self.USER_AGENTS.extend(mobile_agents)
            logger.info(f"使用加强版随机User-Agent: {random_user_agent}")
            
            # 手动指定ChromeDriver路径，避免自动下载
            chrome_driver_path = r"D:\tools\ChromeDriver\chromedriver.exe"  # 使用项目目录下的chromedriver
            service = Service(executable_path=chrome_driver_path)
            self.driver = webdriver.Chrome(service=service, options=chrome_options)
            logger.info(f"WebDriver初始化成功，使用本地ChromeDriver: {chrome_driver_path}")
        except Exception as e:
            logger.error(f"WebDriver初始化失败: {str(e)}")
            raise
    
    def get_data(self):
        """获取空气质量数据，只获取最新的一条数据"""
        try:
            if not self.driver:
                self.setup_driver()
            
            # 每次请求前重新设置随机User-Agent
            random_user_agent = random.choice(self.USER_AGENTS)
            self.driver.execute_cdp_cmd('Network.setUserAgentOverride', {"userAgent": random_user_agent})
            logger.info(f"正在访问URL: {self.url}，使用User-Agent: {random_user_agent}")
            self.driver.get(self.url)
            
            # 3秒内平滑滚动到页面底部
            self.driver.execute_script("""
                const scrollHeight = document.body.scrollHeight;
                const scrollStep = scrollHeight / 30;  // 分30步滚动
                let currentPosition = 0;
                const scrollInterval = setInterval(() => {
                    window.scrollBy(0, scrollStep);
                    currentPosition += scrollStep;
                    if (currentPosition >= scrollHeight) {
                        clearInterval(scrollInterval);
                    }
                }, 100);  // 每100ms滚动一次，总共3秒
            """)
            time.sleep(3)  # 等待滚动完成
            iframe = WebDriverWait(self.driver, 25).until(
                        EC.presence_of_element_located((By.ID, "kqzlIframe")))
            self.driver.switch_to.frame(iframe)
            # 增强型等待策略（增加重试机制）
            max_retries = 3
            for attempt in range(max_retries):
                try:
                    iframe = WebDriverWait(self.driver, 25).until(
                        EC.presence_of_element_located((By.ID, "zsBz")))
                    break
                except Exception as e:
                    if attempt == max_retries - 1:
                        raise
                    logger.warning(f"iframe加载失败，第{attempt+1}次重试...")
                    self.driver.refresh()
                    time.sleep(5)

            
            # 切换到iframe
            self.driver.switch_to.frame(iframe)
            logger.info("已切换到iframe")
            time.sleep(5)  # 等待页面加载
            # 优化表格等待条件（增加可见性检查）
            table = WebDriverWait(self.driver, 30).until(
                EC.visibility_of_element_located((By.ID, "aqiList")))

            
            # 获取表格HTML
            table_html = table.get_attribute('outerHTML')
            
            # 使用pandas解析表格
            dfs = pd.read_html(table_html)
            if dfs and len(dfs) > 0:
                # 只获取第一个表格的第一行数据（最新数据）
                df = dfs[0].head(1)
                logger.info(f"成功获取最新数据: \n{df}")
                print(df)
                return df
            else:
                logger.warning("未找到数据表格")
                return None
                
        except Exception as e:
            logger.error(f"获取数据失败: {str(e)}")
            # 如果发生错误，不关闭driver，下次重试
            return None
        finally:
            # 切回主框架，但不关闭driver
            self.driver.switch_to.default_content()
    
    def preprocess_data(self, df):
        """处理空气质量数据
        
        Args:
            df: 包含空气质量数据的DataFrame
            
        Returns:
            处理后的DataFrame
        """
        if df is None or df.empty:
            return None
            
        try:
            # 添加时间戳
            df['timestamp'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            
            # 这里可以添加更多的数据处理逻辑
            # 例如：重命名列、数据类型转换、单位转换等
            
            return df
        except Exception as e:
            logger.error(f"数据处理失败: {str(e)}")
            return None
    
    def get_air_quality_data(self, start_date, end_date):
        """获取指定日期范围内的空气质量数据
        
        Args:
            start_date: 开始日期 (YYYY-MM-DD)
            end_date: 结束日期 (YYYY-MM-DD)
            
        Returns:
            包含空气质量数据的列表，格式与数据库表结构一致
        """
        logger.info(f"获取从 {start_date} 到 {end_date} 的空气质量数据")
        
        try:
            # 当前实现只能获取最新数据，所以忽略日期范围参数
            # 实际项目中应该根据日期范围参数查询历史数据
            df = self.get_data()
            if df is None or df.empty:
                logger.warning("未获取到数据")
                return None
                
            # 处理数据并转换为测试脚本期望的格式
            processed_df = self.preprocess_data(df)
            if processed_df is None:
                return None
                
            # 将DataFrame转换为字典列表，确保格式与数据库表结构一致
            data_list = []
            for _, row in processed_df.iterrows():
                # 获取当前时间作为时间戳
                current_time = datetime.now()
                
                # 从DataFrame中提取数据，确保数值类型正确
                record = {
                    "timestamp": current_time,  # 使用当前时间
                    "PM2_5": float(row.get("PM2.5", 0)) if "PM2.5" in row else 0.0,
                    "PM10": float(row.get("PM10", 0)) if "PM10" in row else 0.0,
                    "O3": float(row.get("O3", 0)) if "O3" in row else 0.0,
                    "SO2": float(row.get("SO2", 0)) if "SO2" in row else 0.0,
                    "NO2": float(row.get("NO2", 0)) if "NO2" in row else 0.0,
                    "CO": float(row.get("CO", 0)) if "CO" in row else 0.0,
                    "AQI": int(row.get("AQI", 0)) if "AQI" in row else 0
                }
                data_list.append(record)
                
            logger.info(f"成功获取 {len(data_list)} 条记录")
            return data_list
        except Exception as e:
            logger.error(f"获取空气质量数据失败: {str(e)}")
            return None
    
    def save_data(self, data):
        """
        保存空气质量数据到MySQL数据库
        
        Args:
            data: 包含空气质量数据的列表
            
        Returns:
            保存是否成功
        """
        if not data:
            logger.warning("没有数据可保存")
            return False
            
        try:
            # 导入数据保存模块
            from scraper.data_saver import save_data_to_mysql, init_db
            
            # 初始化数据库会话
            session_maker = init_db()
            
            # 保存数据到MySQL数据库
            success = save_data_to_mysql(data, session_maker)
            if success:
                logger.info(f"成功保存 {len(data)} 条记录到MySQL数据库")
            else:
                logger.error("保存数据到MySQL数据库失败")
                return False
            
            # 如果启用了Kafka，发送数据到Kafka
            if self.use_kafka and self.kafka_producer:
                logger.info(f"发送 {len(data)} 条记录到Kafka")
                # 实际项目中应该实现Kafka发送逻辑
                # for record in data:
                #     self.kafka_producer.send('air_quality', json.dumps(record).encode('utf-8'))
            
            return True
        except Exception as e:
            logger.error(f"保存数据失败: {str(e)}")
            return False
    
    def refresh_data(self, interval_seconds=86400):
        """定时刷新获取数据
        
        Args:
            interval_seconds: 刷新间隔，默认86400秒（1天）
        """
        while True:
            try:
                logger.info(f"开始获取数据，当前时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
                today = datetime.now().strftime("%Y-%m-%d")
                data = self.get_air_quality_data(today, today)
                
                if data:
                    success = self.save_data(data)
                    if success:
                        logger.info("数据保存成功")
                    else:
                        logger.warning("数据保存失败")
                
                logger.info(f"等待{interval_seconds}秒后刷新数据")
                time.sleep(interval_seconds)
            except KeyboardInterrupt:
                logger.info("用户中断，停止获取数据")
                break
            except Exception as e:
                logger.error(f"刷新数据时发生错误: {str(e)}")
                # 出错后等待一段时间再重试
                time.sleep(60)
    
    def close(self):
        """关闭WebDriver"""
        if self.driver:
            self.driver.quit()
            self.driver = None
            logger.info("WebDriver已关闭")

# 示例用法
if __name__ == "__main__":
    scraper = ShanghaiAirQualityScraper()
    try:
        # 获取一次数据
        df = scraper.get_data()
        processed_df = scraper.preprocess_data(df)
        if processed_df is not None:
            print(processed_df)
        
        # 如果需要定时刷新数据，取消下面的注释
        # scraper.refresh_data(interval_seconds=86400)  # 每天刷新一次
    except Exception as e:
        print(f"发生错误: {str(e)}")
    finally:
        # 如果不需要保持会话，可以关闭driver
        # scraper.close()
        pass
