import requests
from bs4 import BeautifulSoup
import time
import logging
from config.config import CRAWLER_CONFIG
from models.job import Job
from .spiders import Spider58, SpiderZhipin, SpiderLiepin, SpiderLLRLZYFW
import os
import urllib3

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

class MultiPlatformSpider:
    """
    多平台爬虫管理器
    """
    
    def __init__(self, config):
        self.config = config
        self.headers = config['headers']
        self.spiders = {
            '58': Spider58(config['platforms']['58']),
            'zhipin': SpiderZhipin(config['platforms']['zhipin']),
            'liepin': SpiderLiepin(config['platforms']['liepin']),
            'llrlzyfw': SpiderLLRLZYFW(config['platforms']['llrlzyfw'])
        }
        
    def crawl_all(self):
        """
        爬取所有平台的职位信息
        """
        all_jobs = []
        
        for platform, spider in self.spiders.items():
            try:
                logging.info(f"开始爬取 {platform} 平台...")
                
                if platform == 'llrlzyfw':
                    # 老年人力资源服务网使用API
                    data = spider.fetch_jobs()
                    if data:
                        jobs = spider.parse_job_list(data)
                        all_jobs.extend(jobs)
                        logging.info(f"{platform} 平台爬取完成，获取 {len(jobs)} 个职位")
                else:
                    # 其他平台使用HTML解析
                    html = self.fetch_page(
                        self.config['platforms'][platform]['base_url'],
                        platform
                    )
                    if html:
                        jobs = spider.parse_job_list(html)
                        all_jobs.extend(jobs)
                        logging.info(f"{platform} 平台爬取完成，获取 {len(jobs)} 个职位")
                
                # 爬取间隔
                time.sleep(self.config['crawl_interval'])
                
            except Exception as e:
                logging.error(f"爬取 {platform} 平台失败: {str(e)}")
                continue
                
        return all_jobs

    def fetch_page(self, url, platform=None, max_retries=3):
        """
        获取页面内容，支持重试
        """
        for attempt in range(max_retries):
            try:
                logging.info(f"正在请求URL: {url} (尝试 {attempt + 1}/{max_retries})")
                
                if platform and 'headers' in self.config['platforms'][platform]:
                    headers = self.config['platforms'][platform]['headers']
                else:
                    headers = self.config['headers']
                
                proxies = self.config.get('PROXY_CONFIG', {}) if self.config.get('PROXY_CONFIG', {}).get('enable') else None
                
                response = requests.get(
                    url, 
                    headers=headers, 
                    timeout=10,
                    proxies=proxies,
                    verify=False
                )
                
                # 检查响应状态
                response.raise_for_status()
                
                # 检查响应内容类型
                content_type = response.headers.get('content-type', '')
                if 'application/json' in content_type:
                    logging.info(f"收到JSON响应: {response.text[:200]}...")  # 只记录前200个字符
                else:
                    logging.info(f"收到HTML响应，长度: {len(response.text)}")
                
                # 保存响应内容用于调试
                debug_dir = f'debug/{platform}'
                os.makedirs(debug_dir, exist_ok=True)
                
                debug_file = f'{debug_dir}/response_{attempt}.html'
                with open(debug_file, 'w', encoding='utf-8') as f:
                    f.write(response.text)
                logging.info(f"响应内容已保存到: {debug_file}")
                
                # 检查响应内容是否为空
                if not response.text.strip():
                    logging.warning("收到空响应")
                    continue
                    
                return response.text
                
            except requests.exceptions.RequestException as e:
                if attempt < max_retries - 1:
                    wait_time = 2 ** attempt
                    logging.warning(f"请求失败，{wait_time}秒后重试: {str(e)}")
                    time.sleep(wait_time)
                else:
                    logging.error(f"抓取页面失败: {url}, 错误: {str(e)}")
                    
        return None
