import os
import numpy as np
import pandas as pd
import nibabel as nib
from tqdm import tqdm
import gzip
import shutil

def is_valid_gzip(file_path):
    """检查gzip文件是否完整"""
    try:
        with gzip.open(file_path, 'rb') as f:
            while f.read(1024 * 1024) != b'':  # 每次读取1MB数据
                pass
        return True
    except gzip.BadGzipFile:
        return False
    except EOFError:
        return False
    except Exception as e:
        print(f"文件检查错误: {e}")
        return False

def extract_and_clean_gzip(gz_path, nii_path, chunk_size=1024 * 1024):
    """安全解压并修复可能的gzip文件问题"""
    try:
        with gzip.open(gz_path, 'rb') as f_in:
            with open(nii_path, 'wb') as f_out:
                shutil.copyfileobj(f_in, f_out, length=chunk_size)
        return nii_path
    except (EOFError, gzip.BadGzipFile) as e:
        print(f"标准解压失败: {e}, 尝试修复...")
        try:
            with open(gz_path, 'rb') as f_in:
                with open(nii_path, 'wb') as f_out:
                    f_in.seek(0)
                    decompressor = gzip.decompressobj()
                    while True:
                        chunk = f_in.read(chunk_size)
                        if not chunk:
                            break
                        try:
                            decompressed = decompressor.decompress(chunk)
                            f_out.write(decompressed)
                        except (EOFError, gzip.Error):
                            f_out.write(decompressed if decompressed else b'')
                            break
            print(f"部分解压完成，创建部分修复的文件: {nii_path}")
            return nii_path
        except Exception as e:
            print(f"文件修复失败: {e}")
            return None
    except Exception as e:
        print(f"解压过程中未知错误: {e}")
        return None

def safe_load_nifti(nii_gz_path):
    """安全加载NIfTI文件"""
    # 检查gzip完整性
    if not is_valid_gzip(nii_gz_path):
        print(f"文件损坏: {nii_gz_path}")
        
        # 尝试修复文件
        temp_nii_path = nii_gz_path.replace('.nii.gz', '_temp.nii')
        result = extract_and_clean_gzip(nii_gz_path, temp_nii_path)
        
        if result is None:
            print(f"无法修复文件: {nii_gz_path}")
            return None
        
        print(f"使用修复后的临时文件: {temp_nii_path}")
        try:
            img = nib.load(temp_nii_path)
            return img
        finally:
            # 清理临时文件
            try:
                os.remove(temp_nii_path)
            except:
                pass
    else:
        return nib.load(nii_gz_path)
def cut_ribfrac_patches(original_data_dir, data_dir):
    # image_dir = os.path.join('../../../data_2/lzy/ribfrac-train-images/ribfrac-train-images')  # CT影像目录
    # label_dir = os.path.join('../../../data_2/lzy/ribfrac-train-images/ribfrac-train-labels')  # 标注文件目录
    # csv_path =  os.path.join('../../../data_2/lzy/ribfrac-train-images/ribfrac-train-info.csv')  # 元数据CSV文件
    # output_dir = os.path.join('./ribfrac-patches')

    image_dir = os.path.join(original_data_dir,'ribfrac-test-images')  # CT影像目录
    label_dir = os.path.join(original_data_dir,'ribfrac-test-labels')  # 标注文件目录
    csv_path =  os.path.join(original_data_dir,'ribfrac-test-info.csv')  # 元数据CSV文件
    output_dir = os.path.join(data_dir)

    os.makedirs(os.path.join(output_dir, 'images'), exist_ok=True)
    os.makedirs(os.path.join(output_dir, 'labels'), exist_ok=True)
    os.makedirs(os.path.join(output_dir, 'metadata'), exist_ok=True)
    os.makedirs(os.path.join(output_dir, 'logs'), exist_ok=True)

    df = pd.read_csv(csv_path)
    valid_df = df[df['label_code'].isin([1, 2, 3, 4, -1])]  # 分割模型需保留-1类型骨折，分类模型只保留有效骨折类型

    fracture_dict = {}
    for pid, group in valid_df.groupby('public_id'):
        fracture_dict[pid] = {
            'label_ids': group['label_id'].tolist(),
            'label_codes': group['label_code'].tolist()
        }
    log_file = os.path.join(output_dir, 'logs', 'processing_log.txt')
    with open(log_file, 'w', encoding='utf-8') as log:
        log.write("Processing Log\n")
        log.write("=" * 50 + "\n")

    patch_info = []
    patch_size = 64
    half_size = patch_size // 2
    skipped_files = []
    BONE_THRESHOLD = 150

    for public_id, frac_info in tqdm(fracture_dict.items(), desc='Processing CT scans'):
        img_path = os.path.join(image_dir, f'{public_id}-image.nii.gz')
        label_path = os.path.join(label_dir, f'{public_id}-label.nii.gz')
        
        if not os.path.exists(img_path):
            msg = f"CT文件不存在: {img_path}"
            print(msg)
            with open(log_file, 'a') as log:
                log.write(msg + "\n")
            skipped_files.append(img_path)
            continue
        
        if not os.path.exists(label_path):
            msg = f"标签文件不存在: {label_path}"
            print(msg)
            with open(log_file, 'a') as log:
                log.write(msg + "\n")
            skipped_files.append(label_path)
            continue
        
        try:
            ct_img = safe_load_nifti(img_path)
            if ct_img is None:
                msg = f"无法加载CT文件: {img_path}"
                print(msg)
                with open(log_file, 'a') as log:
                    log.write(msg + "\n")
                skipped_files.append(img_path)
                continue
            
            ct_data = ct_img.get_fdata().astype(np.float32)
        except Exception as e:
            msg = f"加载CT数据错误: {img_path}\n{str(e)}"
            print(msg)
            with open(log_file, 'a') as log:
                log.write(msg + "\n")
            skipped_files.append(img_path)
            continue
        
        try:
            label_img = safe_load_nifti(label_path)
            if label_img is None:
                msg = f"无法加载标签文件: {label_path}"
                print(msg)
                with open(log_file, 'a') as log:
                    log.write(msg + "\n")
                skipped_files.append(label_path)
                continue
            
            label_data = label_img.get_fdata()
        except Exception as e:
            msg = f"加载标签数据错误: {label_path}\n{str(e)}"
            print(msg)
            with open(log_file, 'a') as log:
                log.write(msg + "\n")
            skipped_files.append(label_path)
            continue
        
        current_fracture_count = 0
        
        for label_id, label_code in zip(frac_info['label_ids'], frac_info['label_codes']):
            fracture_mask = (label_data == label_id).astype(np.uint8)
            
            positions = np.argwhere(fracture_mask > 0)
            if len(positions) == 0:
                msg = f"骨折 {label_id} 没有发现体素: {public_id}"
                with open(log_file, 'a') as log:
                    log.write(msg + "\n")
                continue
                
            center = np.mean(positions, axis=0).astype(int)
            
            ranges = []
            padded_regions = []
            for dim, size in enumerate(ct_data.shape):
                start = center[dim] - half_size
                end = center[dim] + half_size
                
                pad_start = max(0, -start)
                pad_end = max(0, end - size)
                
                start = max(0, start)
                end = min(size, end)
                
                ranges.append(slice(start, end))
                padded_regions.append((pad_start, patch_size - pad_end))
            
            ct_block = np.zeros((patch_size, patch_size, patch_size), dtype=np.float32)
            ct_crop = ct_data[tuple(ranges)]
            
            ct_block[
                padded_regions[0][0]:padded_regions[0][1],
                padded_regions[1][0]:padded_regions[1][1],
                padded_regions[2][0]:padded_regions[2][1]
            ] = ct_crop
            mask_block = np.zeros((patch_size, patch_size, patch_size), dtype=np.uint8)
            mask_crop = fracture_mask[tuple(ranges)]

            mask_block[
                padded_regions[0][0]:padded_regions[0][1],
                padded_regions[1][0]:padded_regions[1][1],
                padded_regions[2][0]:padded_regions[2][1]
            ] = mask_crop
            class_label = label_code
            base_name = f'{public_id}_{label_id}'
            ct_filename = os.path.join('images', f'{base_name}_ct.npy')
            mask_filename = os.path.join('labels', f'{base_name}_mask.npy')
            
            np.save(os.path.join(output_dir, ct_filename), ct_block)
            np.save(os.path.join(output_dir, mask_filename), mask_block)
            
            patch_info.append({
                'public_id': public_id,
                'label_id': label_id,
                'class_label': class_label,
                'center_x': center[0],
                'center_y': center[1],
                'center_z': center[2],
                'ct_path': ct_filename,
                'mask_path': mask_filename
            })
            
            current_fracture_count += 1
        
        for label_id, label_code in zip(frac_info['label_ids'], frac_info['label_codes']):
            if label_code != 0:
                continue
            bone_mask = (ct_data > BONE_THRESHOLD)
            non_frac_mask = (label_data == 0)
            candidate_mask = bone_mask & non_frac_mask
            candidate_positions = np.argwhere(candidate_mask)
            
            if len(candidate_positions) == 0:
                msg = f"负样本 {label_id} 在 {public_id} 中找不到有效的骨骼位置"
                print(msg)
                with open(log_file, 'a') as log:
                    log.write(msg + "\n")
                continue
            random_idx = np.random.choice(len(candidate_positions))
            center = candidate_positions[random_idx].astype(int)
            ranges = []
            padded_regions = []
            for dim, size in enumerate(ct_data.shape):
                start = center[dim] - half_size
                end = center[dim] + half_size
                
                pad_start = max(0, -start)
                pad_end = max(0, end - size)
                
                start = max(0, start)
                end = min(size, end)
                
                ranges.append(slice(start, end))
                padded_regions.append((pad_start, patch_size - pad_end))
            
            ct_block = np.zeros((patch_size, patch_size, patch_size), dtype=np.float32)
            ct_crop = ct_data[tuple(ranges)]
            ct_block[
                padded_regions[0][0]:padded_regions[0][1],
                padded_regions[1][0]:padded_regions[1][1],
                padded_regions[2][0]:padded_regions[2][1]
            ] = ct_crop
            
            mask_block = np.zeros((patch_size, patch_size, patch_size), dtype=np.uint8)
            
            base_name = f'{public_id}_negative_{label_id}'
            ct_filename = os.path.join('images', f'{base_name}_ct.npy')
            mask_filename = os.path.join('labels', f'{base_name}_mask.npy')
            
            np.save(os.path.join(output_dir, ct_filename), ct_block)
            np.save(os.path.join(output_dir, mask_filename), mask_block)
            
            patch_info.append({
                'public_id': public_id,
                'label_id': label_id,
                'class_label': label_code,  # 0表示负样本
                'center_x': center[0],
                'center_y': center[1],
                'center_z': center[2],
                'ct_path': ct_filename,
                'mask_path': mask_filename
            })
            
            negative_generated = True
            break
        
        msg = f"处理ID {public_id}: 提取 {current_fracture_count} 个骨折块, 负样本 {'成功' if negative_generated else '失败'}"
        with open(log_file, 'a') as log:
            log.write(msg + "\n")

    metadata_df = pd.DataFrame(patch_info)
    metadata_df.to_csv(os.path.join(output_dir, 'metadata', 'metadata.csv'), index=False)

    if skipped_files:
        skipped_file = os.path.join(output_dir, 'logs', 'skipped_files.txt')
        with open(skipped_file, 'w') as sf:
            sf.write("\n".join(skipped_files))
        print(f"{len(skipped_files)} 个文件被跳过，详情见: {skipped_file}")

    print(f"成功生成 {len(patch_info)} 个3D影像块，保存在 {output_dir} 目录")
    print(f"处理日志保存于: {log_file}")

    msg = f"处理ID {public_id}: 提取 {current_fracture_count} 个骨折块"
    with open(log_file, 'a') as log:
        log.write(msg + "\n")

    metadata_df = pd.DataFrame(patch_info)
    metadata_df.to_csv(os.path.join(output_dir, 'metadata', 'metadata.csv'), index=False)

    if skipped_files:
        skipped_file = os.path.join(output_dir, 'logs', 'skipped_files.txt')
        with open(skipped_file, 'w') as sf:
            sf.write("\n".join(skipped_files))
        print(f"{len(skipped_files)} 个文件被跳过，详情见: {skipped_file}")

    print(f"成功生成 {len(patch_info)} 个3D影像块，保存在 {output_dir} 目录")
    print(f"处理日志保存于: {log_file}")
