import json
import os
import re
import csv
from datetime import datetime

# 创建输出目录
output_dir = 'xian_house_price_csv'
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

# 用于合并数据的字典
merged_data = {
    'new_house': [],
    'esf_house': []
}


def detect_field_type(value):
    """检测字段类型"""
    if isinstance(value, int):
        return 'INT'
    elif isinstance(value, float):
        return 'DECIMAL(10,2)'
    elif isinstance(value, str):
        # 检查是否是数字字符串
        if value.isdigit():
            return 'INT'
        # 检查是否是小数字符串
        try:
            float(value)
            return 'DECIMAL(10,2)'
        except ValueError:
            # 根据字符串长度确定VARCHAR大小
            max_length = min(len(value) * 2, 255)
            return f'VARCHAR({max_length})'
    else:
        return 'VARCHAR(255)'


def process_json_to_csv(json_file, file_type, area=None):
    """处理JSON文件，转换为CSV"""
    try:
        # 读取JSON数据
        with open(json_file, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        if not data:
            print(f"{json_file} 为空，跳过处理")
            return None
        
        # 将数据添加到合并字典中
        merged_data_key = 'new_house' if file_type == 'new' else 'esf_house'
        merged_data[merged_data_key].extend(data)
        print(f"已从 {json_file} 加载 {len(data)} 条{merged_data_key.replace('_', ' ')}数据")
        
        # 获取字段名
        fieldnames = list(data[0].keys())
        
        # 返回字段名信息用于后续生成完整文件
        return {
            'fieldnames': fieldnames,
            'file_type': file_type
        }
        
    except Exception as e:
        print(f"处理 {json_file} 时出错: {e}")
        return None


# 数据库脚本生成功能已移除，不再生成SQL文件


def get_json_files_by_area(input_dir):
    """获取所有按区域分类的JSON文件"""
    area_files = {}
    global_files = {'new_house': [], 'esf_house': []}
    
    # 西安区域列表
    xian_areas = ['高新', '曲江', '雁塔', '莲湖', '碑林', '未央', '长安', '灞桥', '经开', '浐灞', '全市', '未知']
    
    # 获取所有JSON文件
    for filename in os.listdir(input_dir):
        if filename.endswith('.json'):
            file_path = os.path.join(input_dir, filename)
            
            # 检查是否是全局文件
            if filename.startswith('xian_new_house') and 'xian_' not in filename[4:-5] or \
               filename.startswith('xian_esf_house') and 'xian_' not in filename[4:-5]:
                # 这是全局文件
                if 'new_house' in filename:
                    global_files['new_house'].append(file_path)
                elif 'esf_house' in filename:
                    global_files['esf_house'].append(file_path)
            else:
                # 检查是否是按区域分类的文件
                area_found = False
                for area in xian_areas:
                    if f'xian_{area}_new_house' in filename:
                        if area not in area_files:
                            area_files[area] = {'new_house': [], 'esf_house': []}
                        area_files[area]['new_house'].append(file_path)
                        area_found = True
                        break
                    elif f'xian_{area}_esf_house' in filename:
                        if area not in area_files:
                            area_files[area] = {'new_house': [], 'esf_house': []}
                        area_files[area]['esf_house'].append(file_path)
                        area_found = True
                        break
                
                # 如果不是已知区域的文件，但包含new_house或esf_house，则添加到全局文件
                if not area_found:
                    if 'new_house' in filename:
                        global_files['new_house'].append(file_path)
                    elif 'esf_house' in filename:
                        global_files['esf_house'].append(file_path)
    
    return area_files, global_files


def write_merged_csv():
    """将合并的数据写入CSV文件"""
    results = []
    
    # 处理新房数据
    if merged_data['new_house']:
        csv_filepath = os.path.join(output_dir, 'xian_new_house.csv')
        
        # 获取所有可能的字段名（合并所有数据的字段）
        all_fieldnames = set()
        for item in merged_data['new_house']:
            all_fieldnames.update(item.keys())
        fieldnames = sorted(list(all_fieldnames))
        
        print(f"\n开始写入新房CSV文件，共 {len(merged_data['new_house'])} 条数据")
        print(f"字段列表：{fieldnames}")
        
        # 写入CSV文件
        with open(csv_filepath, 'w', newline='', encoding='utf-8-sig') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
            for item in merged_data['new_house']:
                # 确保所有字段都有值
                row_data = {field: item.get(field, '') for field in fieldnames}
                writer.writerow(row_data)
        
        print(f"新房数据已写入：{csv_filepath}")
        results.append({'file_type': 'new', 'fieldnames': fieldnames})
    
    # 处理二手房数据
    if merged_data['esf_house']:
        csv_filepath = os.path.join(output_dir, 'xian_esf_house.csv')
        
        # 获取所有可能的字段名（合并所有数据的字段）
        all_fieldnames = set()
        for item in merged_data['esf_house']:
            all_fieldnames.update(item.keys())
        fieldnames = sorted(list(all_fieldnames))
        
        print(f"\n开始写入二手房CSV文件，共 {len(merged_data['esf_house'])} 条数据")
        print(f"字段列表：{fieldnames}")
        
        # 写入CSV文件
        with open(csv_filepath, 'w', newline='', encoding='utf-8-sig') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
            for item in merged_data['esf_house']:
                # 确保所有字段都有值
                row_data = {field: item.get(field, '') for field in fieldnames}
                writer.writerow(row_data)
        
        print(f"二手房数据已写入：{csv_filepath}")
        results.append({'file_type': 'esf', 'fieldnames': fieldnames})
    
    return results

def main():
    """主函数"""
    print("开始处理房价数据...")
    
    # 获取所有按区域分类的JSON文件
    area_files, global_files = get_json_files_by_area('xian_house_price_data')
    
    file_results = []
    
    # 处理按区域分类的数据
    print("\n处理按区域分类的数据：")
    for area, file_types in area_files.items():
        print(f"\n处理 {area} 区域数据：")
        
        # 处理该区域的新房数据
        if 'new_house' in file_types and file_types['new_house']:
            print(f"  处理 {area} 区域新房数据...")
            for json_file in file_types['new_house']:
                result = process_json_to_csv(json_file, 'new', area)
                if result:
                    file_results.append(result)
        
        # 处理该区域的二手房数据
        if 'esf_house' in file_types and file_types['esf_house']:
            print(f"  处理 {area} 区域二手房数据...")
            for json_file in file_types['esf_house']:
                result = process_json_to_csv(json_file, 'esf', area)
                if result:
                    file_results.append(result)
    
    # 处理全局数据（不按区域分类）
    print("\n处理全局数据（不按区域分类）：")
    
    # 处理全局新房数据
    print("\n处理全局新房数据：")
    for json_file in global_files['new_house']:
        result = process_json_to_csv(json_file, 'new')
        if result:
            file_results.append(result)
    
    # 处理全局二手房数据
    print("\n处理全局二手房数据：")
    for json_file in global_files['esf_house']:
        result = process_json_to_csv(json_file, 'esf')
        if result:
            file_results.append(result)
    
    # 写入合并后的CSV文件
    csv_results = write_merged_csv()
    
    print("\n数据处理完成！")
    print(f"CSV文件已保存至 {output_dir} 目录。")
    
    # 统计生成的文件数量
    csv_files = [f for f in os.listdir(output_dir) if f.endswith('.csv')]
    
    print(f"\n生成文件统计：")
    print(f"- CSV文件: {len(csv_files)} 个")
    
    print(f"\n生成的CSV文件列表：")
    for csv_file in sorted(csv_files):
        if csv_file in ['xian_new_house.csv', 'xian_esf_house.csv']:  # 只显示主要的CSV文件
            file_path = os.path.join(output_dir, csv_file)
            try:
                # 获取文件行数来估算数据量
                with open(file_path, 'r', encoding='utf-8') as f:
                    lines = len(f.readlines())
                    data_count = max(0, lines - 1)  # 减去表头行
                    print(f"  - {csv_file}: 约 {data_count} 条数据")
            except:
                print(f"  - {csv_file}: 无法读取行数")
    
    # 打印合并数据的统计信息
    print(f"\n数据合并统计：")
    print(f"- 新房数据总数: {len(merged_data['new_house'])} 条")
    print(f"- 二手房数据总数: {len(merged_data['esf_house'])} 条")
    print(f"- 总数据量: {len(merged_data['new_house']) + len(merged_data['esf_house'])} 条")


if __name__ == '__main__':
    main()