import scrapy
import json
import urllib.parse
import pymysql
from scrapy.utils.project import get_project_settings
from scrapy.exceptions import CloseSpider


class BaiduMapSpider(scrapy.Spider):
    name = 'baidu_map'
    allowed_domains = ['api.map.baidu.com']
    custom_settings = {
        'ITEM_PIPELINES': {
            'xian_poi.pipelines.JsonWriterPipeline': 300,
            'xian_poi.pipelines.MySQLPipeline': 400,
        },
        'DOWNLOAD_DELAY': 0.8,
        'CONCURRENT_REQUESTS': 2,
        'RETRY_TIMES': 3
    }

    def start_requests(self):
        # 细分查询关键词（参考论文表1分类）
        queries = [
            "景点", "文物古迹", "博物馆",
            "公园", "游乐园", "寺庙",
            "风景区", "植物园", "动物园",
            "水族馆", "教堂"
        ]

        base_url = "http://api.map.baidu.com/place/v2/search"
        for query in queries:
            params = {
                "query": query,
                "region": "西安市",
                "output": "json",
                "ak": "SGEyhSS0Fir6EBEoarGXd8UubB75a1YI",
                "page_size": 20,
                "scope": 2,  # 获取详细信息
                "page_num": 0
            }
            url = f"{base_url}?{urllib.parse.urlencode(params)}"
            yield scrapy.Request(url,
                                 callback=self.parse,
                                 meta={'query': query, 'page_num': 0})

    def parse(self, response):
        data = json.loads(response.text)
        if data.get("status") != 0:
            self.logger.error(f"API请求失败: {data}")
            return

        # 首次请求时获取总页数
        if response.meta.get('page_num') == 0:
            total = data.get('total', 0)
            page_size = 20
            total_pages = min((total + page_size - 1) // page_size, 50)  # 限制最多50页

            # 生成后续分页请求
            for page in range(1, total_pages):
                params = {
                    "query": response.meta['query'],
                    "region": "西安市",
                    "output": "json",
                    "ak": "SGEyhSS0Fir6EBEoarGXd8UubB75a1YI",
                    "page_size": 20,
                    "scope": 2,
                    "page_num": page
                }
                url = f"{response.url.split('?')[0]}?{urllib.parse.urlencode(params)}"
                yield scrapy.Request(url,
                                     callback=self.parse,
                                     meta={'query': response.meta['query'], 'page_num': page})

        # 提取数据
        for poi in data.get("results", []):
            item = {
                "name": poi.get("name"),
                "lng": poi.get("location", {}).get("lng"),
                "lat": poi.get("location", {}).get("lat"),
                "address": poi.get("address"),
                "province": poi.get("province"),
                "city": poi.get("city"),
                "area": poi.get("area"),
                "street_id": poi.get("street_id"),
                "telephone": poi.get("telephone"),
                "detail": poi.get("detail"),
                "uid": poi.get("uid"),
                "tag": poi.get("detail_info", {}).get("tag"),
                "type": poi.get("detail_info", {}).get("type"),
                "overall_rating": poi.get("detail_info", {}).get("overall_rating"),
                "comment_num": poi.get("detail_info", {}).get("comment_num"),
                "query_category": response.meta['query']  # 记录查询分类
            }

            # 西安坐标范围验证
            if (108.8 < item['lng'] < 109.5) and (33.9 < item['lat'] < 34.5):
                yield item
            else:
                self.logger.warning(f"坐标超出西安范围: {item['name']} ({item['lng']}, {item['lat']})")