"""
Google搜索爬虫 - Scrapy版本
"""
import scrapy
import re
from urllib.parse import quote
from typing import List, Dict, Any, Optional
from .base_spider import BaseScrapySpider, SpiderFactory

class GoogleSearchSpider(BaseScrapySpider):
	name = 'google_search'

	def __init__(self, name=None, search_queries: Optional[List[str]] = None, status_callback=None, **kwargs):
		super().__init__(name or self.name, query=None, status_callback=status_callback, **kwargs)
		self.search_queries = search_queries or []
		self.custom_settings = {
			'DOWNLOAD_DELAY': 3,
			'RANDOMIZE_DOWNLOAD_DELAY': True,
			'CONCURRENT_REQUESTS': 1,
			'CONCURRENT_REQUESTS_PER_DOMAIN': 1,
			'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
		}

	def _get_start_requests(self):
		if not self.search_queries:
			return []
		for search_query in self.search_queries:
			url = f"https://www.google.com/search?q={quote(search_query)}&num=10"
			yield scrapy.Request(
				url=url,
				callback=self.parse_search_results,
				meta={'search_query': search_query}
			)

	def parse_search_results(self, response):
		search_results = response.css('div.g')
		for result in search_results:
			link = result.css('a::attr(href)').get()
			title = result.css('h3::text').get()
			if link and link.startswith('http'):
				yield scrapy.Request(
					url=link,
					callback=self.parse_contact_page,
					meta={'title': title, 'search_query': response.meta['search_query']}
				)

	def parse_contact_page(self, response):
		content = response.text
		email_pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
		phone_pattern = r'1[3-9]\d{9}|0\d{2,3}-?\d{7,8}'
		emails = re.findall(email_pattern, content)
		phones = re.findall(phone_pattern, content)
		contact_patterns = [
			r'联系人[：:]\s*([^\s\n\r]+)',
			r'联系[：:]\s*([^\s\n\r]+)',
			r'contact[：:]\s*([^\s\n\r]+)'
		]
		contact_name = ""
		for pattern in contact_patterns:
			m = re.search(pattern, content, re.IGNORECASE)
			if m:
				contact_name = m.group(1)
				break

		if emails or phones:
			yield {
				"search_query": response.meta.get('search_query', ''),
				"contact_name": contact_name,
				"email": emails[0] if emails else "",
				"phone": phones[0] if phones else "",
				"title": "",
				"source": "google_search",
				"confidence": 0.7,
				"url": response.url,
				"page_title": response.meta.get('title', ''),
				"raw_data": {"emails": emails, "phones": phones}
			}

SpiderFactory.register("google_search", GoogleSearchSpider)