# -*- coding: utf-8 -*-
import json
import os
from datetime import datetime

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


class FangtianxiaPipeline(object):
    def __init__(self):
        # 创建输出目录
        self.output_dir = 'xian_house_price_data'
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        
        # 初始化文件句柄和标志 - 按区域存储
        self.file_handlers = {}
        self.first_items = {}
        self.counts = {}
        
        # 西安区域列表
        self.xian_areas = ['高新', '曲江', '雁塔', '莲湖', '碑林', '未央', '长安', '灞桥', '经开', '浐灞', '全市', '未知']
        self.file_types = ['new_house', 'esf_house']
        
        # 初始化所有可能的文件句柄
        self._init_all_files()
    
    def _init_all_files(self):
        """初始化所有按区域分类的文件句柄"""
        for area in self.xian_areas:
            for file_type in self.file_types:
                # 构建文件路径，格式为：xian_区域_类型.json
                file_name = f'xian_{area}_{file_type}.json'
                file_path = os.path.join(self.output_dir, file_name)
                
                # 处理文件存在的情况
                file_exists = os.path.exists(file_path)
                if file_exists:
                    # 如果文件存在，清空并重新开始
                    try:
                        os.remove(file_path)
                    except:
                        pass
                
                # 打开文件并写入开始括号
                try:
                    file_handler = open(file_path, 'w', encoding='utf-8')
                    file_handler.write('[')
                    
                    # 存储文件句柄和标志
                    key = f'{area}_{file_type}'
                    self.file_handlers[key] = file_handler
                    self.first_items[key] = True  # 新文件，第一个条目标志为True
                    self.counts[key] = 0
                    
                    print(f"已初始化文件: {file_name}")
                except Exception as e:
                    print(f"初始化文件 {file_name} 时出错: {e}")
        
        # 同时也初始化全局文件，用于兼容旧的处理方式
        self._init_global_files()
    
    def _init_global_files(self):
        """初始化全局文件（不按区域分类）"""
        # 新房文件路径
        new_file_path = os.path.join(self.output_dir, 'xian_new_house.json')
        # 二手房文件路径
        esf_file_path = os.path.join(self.output_dir, 'xian_esf_house.json')
        
        # 处理全局新房文件
        if os.path.exists(new_file_path):
            try:
                os.remove(new_file_path)
            except:
                pass
        
        # 处理全局二手房文件
        if os.path.exists(esf_file_path):
            try:
                os.remove(esf_file_path)
            except:
                pass
        
        # 打开全局文件
        try:
            self.file_handlers['global_new_house'] = open(new_file_path, 'w', encoding='utf-8')
            self.file_handlers['global_new_house'].write('[')
            self.first_items['global_new_house'] = True
            self.counts['global_new_house'] = 0
            
            self.file_handlers['global_esf_house'] = open(esf_file_path, 'w', encoding='utf-8')
            self.file_handlers['global_esf_house'].write('[')
            self.first_items['global_esf_house'] = True
            self.counts['global_esf_house'] = 0
        except Exception as e:
            print(f"初始化全局文件时出错: {e}")
    
    def process_item(self, item, spider):
        # 确保item中包含crawl_time字段（年月格式）
        if 'crawl_time' not in item:
            item['crawl_time'] = datetime.now().strftime('%Y-%m')
        
        # 确保item中包含area字段
        if 'area' not in item:
            item['area'] = '未知'
        
        # 判断是新房还是二手房
        file_type = 'new_house' if 'new_link' in item else 'esf_house'
        area = item.get('area', '未知')
        
        # 写入按区域分类的文件
        try:
            # 区域文件键
            area_key = f'{area}_{file_type}'
            # 全局文件键
            global_key = f'global_{file_type}'
            
            # 写入区域文件
            if area_key in self.file_handlers:
                if not self.first_items[area_key]:
                    self.file_handlers[area_key].write(',')
                else:
                    self.first_items[area_key] = False
                
                json.dump(dict(item), self.file_handlers[area_key], ensure_ascii=False, indent=2)
                self.file_handlers[area_key].flush()
                
                self.counts[area_key] += 1
                
                # 每10条数据打印一次进度
                if self.counts[area_key] % 10 == 0:
                    print(f"{area} {file_type.replace('_', ' ')} 已保存 {self.counts[area_key]} 条数据")
            
            # 同时写入全局文件
            if global_key in self.file_handlers:
                if not self.first_items[global_key]:
                    self.file_handlers[global_key].write(',')
                else:
                    self.first_items[global_key] = False
                
                json.dump(dict(item), self.file_handlers[global_key], ensure_ascii=False, indent=2)
                self.file_handlers[global_key].flush()
                
                self.counts[global_key] += 1
        
        except Exception as e:
            print(f"写入{area}区域{file_type}数据时出错: {e}")
        
        return item
    
    def close_spider(self, spider):
        total_count = 0
        area_stats = {}
        
        # 关闭所有文件句柄
        for key, handler in self.file_handlers.items():
            try:
                # 完成JSON数组
                handler.write(']')
                handler.close()
                
                # 统计信息
                count = self.counts.get(key, 0)
                total_count += count
                
                # 解析键获取区域和类型信息
                if '_' in key:
                    parts = key.split('_')
                    if len(parts) >= 3:
                        if parts[0] != 'global':  # 不是全局文件
                            area = parts[0]
                            file_type = '_'.join(parts[1:])
                            if area not in area_stats:
                                area_stats[area] = {}
                            area_stats[area][file_type] = count
                
                print(f"已关闭文件 {key}，共保存 {count} 条数据")
            except Exception as e:
                print(f"关闭文件 {key} 时出错: {e}")
        
        # 打印区域统计信息
        print("\n各区域数据统计:")
        for area, stats in area_stats.items():
            area_total = sum(stats.values())
            print(f"{area} 区域: 共 {area_total} 条数据")
            for file_type, count in stats.items():
                print(f"  - {file_type.replace('_', ' ')}: {count} 条")
        
        # 打印全局统计信息
        global_new = self.counts.get('global_new_house', 0)
        global_esf = self.counts.get('global_esf_house', 0)
        print(f"\n全局数据统计:")
        print(f"新房: {global_new} 条")
        print(f"二手房: {global_esf} 条")
        print(f"总计: {global_new + global_esf} 条")
        
        print(f"\n爬取完成！共保存 {total_count} 条房价数据到xian_house_price_data目录中。")
