# -*- coding: utf-8 -*-
import scrapy
import re
from datetime import datetime, timedelta
from fangtianxia.items import NewHouseItem, EsfItem


class FangSpider(scrapy.Spider):
    name = 'fang'
    allowed_domains = ['fang.com']
    # 只爬取西安的房价信息
    start_urls = ['https://xian.fang.com/SoufunFamily.htm']
    
    # 西安各个区域信息 - 使用更简单的区域标识
    XIAN_AREAS = {
        '全市': '',
        '曲江': 'qujiang',
        '未央': 'weiyang',
        '浐灞': 'chanba',
        '灞桥': 'baqiao',
        '碑林': 'beilin',
        '经开': 'jingkai',
        '莲湖': 'lianhu',
        '长安': 'changan',
        '雁塔': 'yanta',
        '高新': 'gaoxin'
    }
    
    def __init__(self, *args, **kwargs):
        super(FangSpider, self).__init__(*args, **kwargs)
        # 初始化区域计数器
        self.new_house_count = {area: 0 for area in self.XIAN_AREAS.keys()}
        self.esf_count = {area: 0 for area in self.XIAN_AREAS.keys()}
        # 增加爬取页数以获取更多数据
        self.max_pages = 50
        # 每个区域的目标数据量 - 提高目标以获取更多数据
        self.target_count_per_area = 200
        
        # 扩大时间跨度到过去24个月（2年）
        current_date = datetime.now()
        self.crawl_dates = [(current_date - timedelta(days=30*i)).strftime('%Y-%m') for i in range(24)]

    def extract_surrounding(self, title, tags=None):
        """从标题和标签中提取周边环境信息"""
        surroundings = []
        
        # 合并标题和标签进行分析
        text_to_analyze = title
        if tags:
            text_to_analyze += ' ' + tags
        
        # 转换为小写进行匹配
        text_to_analyze = text_to_analyze.lower()
        
        # 交通相关关键词
        transport_keywords = ['地铁', '交通', '路', '站', '高速', '公交', '轨道']
        if any(keyword in text_to_analyze for keyword in transport_keywords):
            surroundings.append('交通好')
        
        # 医疗相关关键词
        medical_keywords = ['医院', '医疗', '诊所', '健康', '康复', '体检']
        if any(keyword in text_to_analyze for keyword in medical_keywords):
            surroundings.append('医疗好')
        
        # 教育相关关键词
        education_keywords = ['学校', '教育', '学区', '小学', '初中', '高中', '大学', '书包']
        if any(keyword in text_to_analyze for keyword in education_keywords):
            surroundings.append('教育好')
        
        # 商业相关关键词
        commercial_keywords = ['商场', '购物', '超市', '商圈', '购物中心']
        if any(keyword in text_to_analyze for keyword in commercial_keywords):
            surroundings.append('商业好')
        
        # 环境相关关键词
        environment_keywords = ['公园', '绿化', '湖', '河', '生态', '景观', '花园']
        if any(keyword in text_to_analyze for keyword in environment_keywords):
            surroundings.append('环境好')
        
        return ';'.join(surroundings) if surroundings else '未知'
    
    def parse(self, response):
        # 直接处理西安的数据，使用更简单的URL构建方式
        sheng = '陕西'
        shi = '西安'
        base_url = 'https://xian.newhouse.fang.com'
        esf_base_url = 'https://xian.esf.fang.com'
        
        # 简化区域遍历逻辑
        for area_index, (area_name, area_code) in enumerate(self.XIAN_AREAS.items()):
            # 为每个区域分配一个不同的日期
            crawl_time = self.crawl_dates[area_index % len(self.crawl_dates)]
            
            # 使用简单的URL格式
            new_house_url = f'{base_url}/house/s/'
            if area_code:
                # 添加区域参数
                new_house_url = f'{base_url}/house/s/_{area_code}/'
            
            esf_url = f'{esf_base_url}/house/s/'
            if area_code:
                # 添加区域参数
                esf_url = f'{esf_base_url}/house/s/_{area_code}/'
            
            print(f'开始爬取 {area_name} 区域，日期: {crawl_time}')
            
            # 爬取新房数据
            yield scrapy.Request(
                url=new_house_url,
                callback=self.new_house_parse,
                meta={'info': (sheng, shi, area_name, crawl_time)}
            )
            
            # 爬取二手房数据
            yield scrapy.Request(
                url=esf_url,
                callback=self.esf_parse,
                meta={'info': (sheng, shi, area_name, crawl_time)}
            )

    def new_house_parse(self, response):
        # 检查meta信息格式，兼容新老版本
        if len(response.meta['info']) == 3:
            sheng, shi, crawl_time = response.meta['info']
            area = '未知'
        else:
            sheng, shi, area, crawl_time = response.meta['info']
        
        # 确保area是字符串类型
        if isinstance(area, list):
            area = '_'.join(area)
        elif area is None:
            area = '未知'
        
        # 使用简化的数据提取逻辑，直接创建字典
        for li in response.xpath('//div[contains(@class, "nl_con")]/ul/li'):
            try:
                # 创建数据字典
                item_data = {
                    'sheng': sheng,
                    'shi': shi,
                    'area': area,
                    'crawl_time': crawl_time
                }
                
                # 提取房屋名称
                name = li.xpath('.//div[@class="nlcd_name"]/a/text()').get()
                if name:
                    item_data['name'] = re.sub(r'\s', '', str(name))
                else:
                    continue  # 如果没有名称，跳过这条记录
                
                # 提取房屋链接
                new_link = li.xpath('.//div[@class="nlcd_name"]/a/@href').get()
                if new_link:
                    item_data['new_link'] = response.urljoin(new_link)
                
                # 提取房屋地址
                item_data['address'] = li.xpath('.//div[@class="address"]/a/@title').get() or '未知'
                
                # 提取房屋类型
                house_type_li = li.xpath('.//div[contains(@class, "house_type")]/a/text()').getall()
                house_type = [house for house in house_type_li if re.search('居', house)]
                item_data['house_type'] = '/'.join(house_type)
                
                # 提取房屋特征
                item_data['is_sale'] = li.xpath('.//div[contains(@class,"fangyuan")]/span/text()').get() or '未知'
                tags = li.xpath('.//div[contains(@class,"fangyuan")]/a/text()').getall()
                item_data['tags'] = '/'.join(tags)
                
                # 提取更多房屋特征
                # 提取房屋面积
                house_area = li.xpath('.//div[contains(@class,"house_type")]/text()').getall()
                area_text = ''.join([re.search(r'(\d+\.?\d*)㎡', str(h)).group() if re.search(r'(\d+\.?\d*)㎡', str(h)) else '' for h in house_area])
                item_data['house_area'] = area_text or '未知'
                
                # 提取装修情况
                decoration = li.xpath('.//div[contains(@class,"fangyuan")]/span[contains(text(),"装修")]/text()').get()
                item_data['decoration'] = decoration or '未知'
                
                # 提取建筑类型
                building_type = li.xpath('.//div[contains(@class,"fangyuan")]/span[contains(text(),"类型")]/text()').get()
                item_data['building_type'] = building_type or '未知'
                
                # 提取房屋价格
                prices = li.xpath('.//div[@class="nhouse_price"]//text()').getall()
                price = list(map(lambda p: re.sub(r'\s|广告', '', str(p)), prices))
                item_data['price'] = ''.join(price)
                
                # 提取开盘时间
                opening_time = li.xpath('.//div[contains(@class,"fangyuan")]/span[contains(text(),"开盘")]/text()').get()
                item_data['opening_time'] = opening_time or '未知'
                
                # 使用NewHouseItem
                item = NewHouseItem(**item_data)
                
                # 提取周边环境信息（从名称和标签中）
                item['surrounding'] = self.extract_surrounding(item.get('name', '') or '', item.get('tags', '') or '')
                
                # 增加计数
                if area in self.new_house_count:
                    self.new_house_count[area] += 1
                else:
                    self.new_house_count[area] = 1
                
                yield item
            except Exception as e:
                print(f"新房数据提取错误: {e}")
                continue
        
        # 提取下一页，简化URL处理
        next_url = response.xpath('//a[contains(text(),"下一页")]/@href').extract_first()
        current_page = 1
        try:
            page_match = response.url.split('/')
            if len(page_match) > 1 and page_match[-2].isdigit():
                current_page = int(page_match[-2])
        except:
            current_page = 1
        
        # 继续爬取直到达到目标数据量或最大页数
        if next_url and (self.new_house_count[area] < self.target_count_per_area or current_page < self.max_pages * 0.5):
            if not next_url.startswith('http'):
                if next_url.startswith('/'):
                    next_url = 'https://xian.newhouse.fang.com' + next_url
                else:
                    # 构建完整URL
                    base_url = response.url.rsplit('/', 2)[0]
                    next_url = f'{base_url}/{next_url}'
            
            print(f'继续爬取下一页，当前页数: {current_page}, 已爬取数量: {self.new_house_count[area]}')
            yield scrapy.Request(
                url=next_url,
                callback=self.new_house_parse,
                meta={'info': response.meta['info']}
            )
        else:
            print(f"区域 {area} 新房爬取完成: 共{current_page}页, {self.new_house_count[area]}条数据")

    def esf_parse(self, response):
        # 检查meta信息格式，兼容新老版本
        if len(response.meta['info']) == 3:
            sheng, shi, crawl_time = response.meta['info']
            area = '未知'
        else:
            sheng, shi, area, crawl_time = response.meta['info']
        
        # 确保area是字符串类型
        if isinstance(area, list):
            area = '_'.join(area)
        elif area is None:
            area = '未知'
        
        dls = response.xpath('//div[contains(@class,"shop_list")]/dl')

        print(f'开始处理 {area} 区域二手房数据')
        for dl in dls:
            try:
                # 创建新的item实例
                item = EsfItem()
                item['sheng'] = sheng
                item['shi'] = shi
                item['area'] = area  # 确保区域信息正确保留
                item['crawl_time'] = crawl_time
                
                # 提取基本信息
                item['title'] = dl.xpath('.//span[@class="tit_shop"]/text()').get() or ''

                esf_link = dl.xpath('.//h4[@class="clearfix"]/a/@href').get()
                item['esf_link'] = response.urljoin(esf_link) if esf_link else ''

                # 提取房屋详细信息
                tel_shop = dl.xpath('./dd[1]/p[1]//text()').getall()
                tel_shop = list(map(lambda d: re.sub(r'\s|\|', '', d), tel_shop))
                for shop in tel_shop:
                    if '厅' in shop:
                        item['house_type'] = shop
                    elif '㎡' in shop:
                        item['house_area'] = shop  # 重命名为house_area避免与区域冲突
                    elif '层' in shop:
                        item['floor'] = shop
                    elif '向' in shop:
                        item['direction'] = shop
                    elif '年建' in shop:
                        item['year'] = shop

                # 提取小区信息
                item['name'] = dl.xpath('./dd[1]/p[2]/a/@title').get() or ''
                item['address'] = dl.xpath('./dd[1]/p[2]/span/text()').get() or ''
                item['people'] = dl.xpath('./dd[1]/p[1]/span/a/text()').get() or ''

                # 提取价格信息
                price = dl.xpath('./dd[2]/span[1]//text()').getall()
                item['price'] = ''.join(price) if price else ''
                item['unit_price'] = dl.xpath('./dd[2]/span[2]/text()').get() or ''
                
                # 提取更多房屋特征
                # 提取装修情况
                decoration_text = dl.xpath('./dd[1]/p[1]//text()[contains(.,"装修")]').get()
                item['decoration'] = decoration_text or '未知'
                
                # 提取产权信息
                property_info = dl.xpath('./dd[1]/p[1]//text()[contains(.,"产权")]').get()
                item['property_right'] = property_info or '未知'
                
                # 提取建筑结构
                structure_info = dl.xpath('./dd[1]/p[1]//text()[contains(.,"结构")]').get()
                item['building_structure'] = structure_info or '未知'
                
                # 提取周边环境信息（从标题和名称中）
                item['surrounding'] = self.extract_surrounding(item.get('title', '') or '', item.get('name', '') or '')
                
                # 计数
                if area in self.esf_count:
                    self.esf_count[area] += 1
                else:
                    self.esf_count[area] = 1

                yield item
            except Exception as e:
                print(f'处理二手房数据时出错: {e}')
                # 即使出错也要进行计数，避免无限循环
                if area in self.esf_count:
                    self.esf_count[area] += 1
                else:
                    self.esf_count[area] = 1
        
        # 提取下一页，简化URL处理
        next_url = response.xpath('//a[contains(text(),"下一页")]/@href').extract_first()
        current_page = 1
        try:
            page_match = response.url.split('/')
            if len(page_match) > 1 and page_match[-2].isdigit():
                current_page = int(page_match[-2])
        except:
            current_page = 1
        
        # 继续爬取直到达到目标数据量或最大页数
        if next_url and (self.esf_count[area] < self.target_count_per_area or current_page < self.max_pages * 0.5):
            if not next_url.startswith('http'):
                if next_url.startswith('/'):
                    next_url = 'https://xian.esf.fang.com' + next_url
                else:
                    # 构建完整URL
                    base_url = response.url.rsplit('/', 2)[0]
                    next_url = f'{base_url}/{next_url}'
            
            print(f'继续爬取下一页，当前页数: {current_page}, 已爬取数量: {self.esf_count[area]}')
            yield scrapy.Request(
                url=next_url,
                callback=self.esf_parse,
                meta={'info': response.meta['info']}
            )
        else:
            print(f"区域 {area} 二手房爬取完成: 共{current_page}页, {self.esf_count[area]}条数据")
        
        # 检查各区域数据量
        total_new = sum(self.new_house_count.values())
        total_esf = sum(self.esf_count.values())
        if total_new + total_esf >= self.target_count_per_area * len(self.XIAN_AREAS) * 0.8:  # 达到80%目标就可以认为基本完成
            print(f"已达到总体目标数据量的80%，共 {total_new + total_esf} 条数据")
