import { Request, Response, NextFunction } from 'express';
import logger from '@utils/logger';

/**
 * 爬虫控制器
 */
class CrawlerController {
  // 推荐的数据源白名单
  private readonly RECOMMENDED_DOMAINS = [
    'api.github.com',
    'api.stackexchange.com',
    'hub.docker.com',
    'kubernetes.io',
    'docs.docker.com',
    'nodejs.org'
  ];

  // 禁止爬取的域名黑名单
  private readonly BLOCKED_DOMAINS = [
    'facebook.com',
    'twitter.com',
    'weibo.com',
    'taobao.com',
    'tmall.com',
    'jd.com'
  ];

  /**
   * 检查URL是否安全
   */
  private isUrlSafe(url: string): { safe: boolean; message?: string } {
    try {
      const urlObj = new URL(url);
      const hostname = urlObj.hostname.toLowerCase();

      // 检查是否在黑名单中
      for (const blocked of this.BLOCKED_DOMAINS) {
        if (hostname.includes(blocked)) {
          return {
            safe: false,
            message: `不允许爬取 ${blocked} 域名，可能违反服务条款或法律法规`
          };
        }
      }

      // 检查是否是推荐域名
      const isRecommended = this.RECOMMENDED_DOMAINS.some(domain => hostname.includes(domain));
      if (!isRecommended) {
        logger.warn(`Warning: ${hostname} is not in recommended domains list`);
      }

      return { safe: true };
    } catch (error) {
      return { safe: false, message: 'Invalid URL format' };
    }
  }

  /**
   * 测试爬虫配置
   */
  async testCrawler(req: Request, res: Response, next: NextFunction): Promise<void> {
    try {
      const { url, type, selectors, interval = 5000 } = req.body;

      // 验证参数
      if (!url) {
        res.status(400).json({
          success: false,
          message: 'URL is required',
        });
        return;
      }

      // 安全检查
      const safetyCheck = this.isUrlSafe(url);
      if (!safetyCheck.safe) {
        res.status(403).json({
          success: false,
          message: safetyCheck.message,
          error: 'URL not allowed due to safety or legal concerns'
        });
        return;
      }

      // 检查频率限制
      if (interval < 5000) {
        res.status(400).json({
          success: false,
          message: 'Request interval must be at least 5000ms (5 seconds) to comply with rate limiting',
        });
        return;
      }

      // 模拟爬虫测试
      const testResult = {
        success: true,
        url,
        type: type || 'web',
        interval,
        timestamp: new Date().toISOString(),
        data: {
          title: '测试页面标题',
          content: '这是爬取到的测试内容',
          links: ['https://example.com/page1', 'https://example.com/page2'],
          metadata: {
            author: 'Test Author',
            publishDate: '2025-10-07',
            tags: ['测试', '爬虫']
          }
        },
        statistics: {
          responseTime: 245,
          dataSize: 1024,
          linksFound: 2
        },
        warnings: this.RECOMMENDED_DOMAINS.some(d => url.includes(d)) ? [] : [
          '⚠️ 此域名不在推荐列表中，请确保有合法授权'
        ]
      };

      logger.info(`Crawler test completed for URL: ${url}`);

      res.status(200).json({
        success: true,
        data: testResult,
        message: 'Crawler test completed successfully',
      });
    } catch (error) {
      logger.error('Failed to test crawler:', error as Error);
      next(error);
    }
  }

  /**
   * 获取爬虫列表
   */
  async getCrawlers(req: Request, res: Response, next: NextFunction): Promise<void> {
    try {
      const { page = '1', pageSize = '20' } = req.query;

      const crawlers = [
        {
          id: '1',
          name: '技术文档爬虫',
          url: 'https://docs.example.com',
          type: 'documentation',
          status: 'active',
          lastRun: new Date().toISOString(),
          itemsCollected: 1250
        },
        {
          id: '2',
          name: '日志文件监控',
          url: '/var/log/system.log',
          type: 'log',
          status: 'active',
          lastRun: new Date().toISOString(),
          itemsCollected: 5432
        }
      ];

      res.status(200).json({
        success: true,
        data: crawlers,
        pagination: {
          current: parseInt(page as string),
          pageSize: parseInt(pageSize as string),
          total: crawlers.length,
        },
      });
    } catch (error) {
      logger.error('Failed to get crawlers:', error as Error);
      next(error);
    }
  }

  /**
   * 创建爬虫
   */
  async createCrawler(req: Request, res: Response, next: NextFunction): Promise<void> {
    try {
      const { name, url, type, schedule, selectors } = req.body;

      const crawler = {
        id: Date.now().toString(),
        name,
        url,
        type: type || 'web',
        schedule: schedule || '0 */6 * * *',
        selectors: selectors || {},
        status: 'inactive',
        createdAt: new Date().toISOString(),
      };

      res.status(201).json({
        success: true,
        data: crawler,
        message: 'Crawler created successfully',
      });
    } catch (error) {
      logger.error('Failed to create crawler:', error as Error);
      next(error);
    }
  }
}

export default new CrawlerController();
