#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
图像复用数据集生成器
根据相似图片标注，在同一篇论文内进行图片复用和篡改操作
"""

import json
import os
import random
import shutil
from pathlib import Path
from typing import Dict, List, Tuple, Optional

import cv2
import numpy as np
from PIL import Image


# ==================== 篡改操作函数 ====================
# 从 ProcessImage.py 复制的篡改函数

def flip_image_horizontal(image):
    """水平翻转图像"""
    return cv2.flip(image, 1)

def flip_image_vertical(image):
    """垂直翻转图像"""
    return cv2.flip(image, 0)

def rotate_image_180(image):
    """旋转180度"""
    return cv2.rotate(image, cv2.ROTATE_180)

def process_brightness(image, alpha=1.2):
    """调整图像的亮度"""
    hsv_image = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2HSV)
    hsv_image[:, :, 2] = cv2.convertScaleAbs(hsv_image[:, :, 2], alpha=alpha, beta=0)
    return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)

def process_contrast(image, alpha=1.2):
    """调整图像的对比度"""
    ycrcb_image = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2YCrCb)
    ycrcb_image[:, :, 0] = cv2.convertScaleAbs(ycrcb_image[:, :, 0], alpha=alpha, beta=0)
    return cv2.cvtColor(ycrcb_image, cv2.COLOR_YCrCb2BGR)

def process_saturation(image, alpha=1.2):
    """调整图像的饱和度"""
    hsv_image = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2HSV)
    hsv_image[:, :, 1] = cv2.convertScaleAbs(hsv_image[:, :, 1], alpha=alpha)
    return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)

def process_hue(image, beta=50):
    """调整图像的色相"""
    hsv_image = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2HSV)
    hsv_image[:, :, 0] = cv2.convertScaleAbs(hsv_image[:, :, 0], alpha=1, beta=beta)
    return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)

def process_color_temperature(image, n=20):
    """调整图像的色温"""
    n = max(-255, min(255, n))
    
    def create_lut(level):
        lut = np.arange(256, dtype=np.uint8)
        for i in range(256):
            lut[i] = np.clip(i + level, 0, 255)
        return lut
    
    result = image.copy()
    level = n // 2
    lut_r = create_lut(level)
    lut_g = create_lut(level)
    lut_b = create_lut(-level)
    result[:, :, 2] = cv2.LUT(result[:, :, 2], lut_r)
    result[:, :, 1] = cv2.LUT(result[:, :, 1], lut_g)
    result[:, :, 0] = cv2.LUT(result[:, :, 0], lut_b)
    return result


# 篡改操作字典
GEOMETRIC_OPERATIONS = {
    "flip_horizontal": flip_image_horizontal,
    "flip_vertical": flip_image_vertical,
    "rotate_180": rotate_image_180,
}

PARAMETER_OPERATIONS = {
    "brightness": lambda img: process_brightness(img, alpha=random.uniform(0.8, 1.5)),
    "contrast": lambda img: process_contrast(img, alpha=random.uniform(0.8, 1.5)),
    "saturation": lambda img: process_saturation(img, alpha=random.uniform(0.8, 1.5)),
    "hue": lambda img: process_hue(img, beta=random.randint(-50, 50)),
    "color_temperature": lambda img: process_color_temperature(img, n=random.randint(-30, 30)),
}


# ==================== 配置参数 ====================
# 输入路径
REAL_PDF_ROOT = "/home/zhangbo/workspace/aigc/genome/dataset/real/real_pdf"
REAL_JSON_PATH = "/home/zhangbo/workspace/aigc/genome/dataset/real/real.json"
SIMILAR_IMAGES_JSONL = "/home/zhangbo/workspace/aigc/genome/dataset/scripts/generate_real/splicing_pages_20250903_192853.jsonl"

# 输出路径
OUTPUT_ROOT = "/home/haozhuodi/iclr2026/image_duplication/111/data"
OUTPUT_JSON_PATH = "/home/haozhuodi/iclr2026/image_duplication/111/duplication_results.json"

# 篡改操作配置
MAX_GEOMETRIC_OPS = 2  # 最多2种几何变换
MAX_PARAMETER_OPS = 2  # 最多2种参数调整


# ==================== 核心功能函数 ====================

def load_similar_images_data(jsonl_path: str) -> List[Dict]:
    """加载相似图片标注数据"""
    similar_data = []
    with open(jsonl_path, 'r', encoding='utf-8') as f:
        for line in f:
            if line.strip():
                similar_data.append(json.loads(line.strip()))
    return similar_data

def load_real_json(json_path: str) -> Dict:
    """加载基础JSON数据"""
    with open(json_path, 'r', encoding='utf-8') as f:
        return json.load(f)

def find_paper_by_id(papers: List[Dict], paper_id: int) -> Optional[Dict]:
    """根据paper_id查找论文数据"""
    for paper in papers:
        if paper['paper_id'] == paper_id:
            return paper
    return None

def find_page_by_index(paper: Dict, page_index: int) -> Optional[Dict]:
    """根据page_index查找页面数据"""
    for page in paper['pages']:
        if page['page_index'] == page_index:
            return page
    return None

def get_panel_pairs(panels: List[Dict], panel_count: int) -> List[Tuple[int, int]]:
    """根据子图数量生成配对列表"""
    # 计算配对数量：子图数量÷2向下取整，最多3对
    pair_count = min(3, panel_count // 2)
    pairs = []
    
    for i in range(pair_count):
        # 配对方式：1和2，3和4，5和6
        panel_a_idx = i * 2
        panel_b_idx = i * 2 + 1
        if panel_b_idx < len(panels):
            pairs.append((panel_a_idx, panel_b_idx))
    
    return pairs

def apply_tampering_operations(image: np.ndarray) -> Tuple[np.ndarray, List[str]]:
    """对图像应用随机篡改操作"""
    processed_image = image.copy()
    applied_operations = []
    
    # 随机选择几何变换操作（0-2种）
    geometric_ops = list(GEOMETRIC_OPERATIONS.keys())
    num_geometric = random.randint(0, min(MAX_GEOMETRIC_OPS, len(geometric_ops)))
    selected_geometric = random.sample(geometric_ops, num_geometric)
    
    for op_name in selected_geometric:
        processed_image = GEOMETRIC_OPERATIONS[op_name](processed_image)
        applied_operations.append(op_name)
    
    # 随机选择参数调整操作（0-2种）
    parameter_ops = list(PARAMETER_OPERATIONS.keys())
    num_parameter = random.randint(0, min(MAX_PARAMETER_OPS, len(parameter_ops)))
    selected_parameter = random.sample(parameter_ops, num_parameter)
    
    for op_name in selected_parameter:
        processed_image = PARAMETER_OPERATIONS[op_name](processed_image)
        applied_operations.append(op_name)
    
    return processed_image, applied_operations

def create_output_directory_structure(paper_id: int) -> str:
    """创建输出目录结构"""
    output_dir = os.path.join(OUTPUT_ROOT, f"{paper_id}_Duplication")
    panel_dir = os.path.join(output_dir, "panel")
    os.makedirs(panel_dir, exist_ok=True)
    return output_dir

def copy_and_tamper_image(source_path: str, target_path: str, 
                         figure_id: str, panel_id: str) -> Tuple[str, List[str]]:
    """复制并篡改图像"""
    # 确保目标目录存在
    os.makedirs(os.path.dirname(target_path), exist_ok=True)
    
    # 读取原始图像
    image = cv2.imread(source_path)
    if image is None:
        raise ValueError(f"无法读取图像: {source_path}")
    
    # 应用篡改操作
    tampered_image, operations = apply_tampering_operations(image)
    
    # 保存篡改后的图像
    cv2.imwrite(target_path, tampered_image)
    
    return target_path, operations

def update_panel_issues(panel: Dict, operations: List[str], 
                       original_panel_id: str, tampered_panel_id: str) -> Dict:
    """更新panel的issues信息"""
    panel["panel_level_issues"] = {
        "has_issue": True,
        "issues": [{
            "issue_id": "duplication_001",
            "scope": "within",
            "issue_type": "forgery",
            "issue_subtype": "image_duplication",
            "evidence": {
                "duplication_method": "intra_paper_reuse",
                "original_panel_id": original_panel_id,
                "tampered_panel_id": tampered_panel_id,
                "tampering_operations": operations,
                "tampering_count": len(operations)
            }
        }]
    }
    return panel

def process_single_paper(paper_id: int, similar_data: Dict, real_data: Dict) -> Optional[Dict]:
    """处理单篇论文的图像复用"""
    print(f"处理论文 {paper_id}...")
    
    # 查找论文数据
    paper = find_paper_by_id(real_data['papers'], paper_id)
    if not paper:
        print(f"未找到论文 {paper_id}")
        return None
    
    # 查找页面数据
    page_index = similar_data['page_index_1']
    page = find_page_by_index(paper, page_index)
    if not page:
        print(f"未找到论文 {paper_id} 的第 {page_index} 页")
        return None
    
    # 检查是否有figures
    if not page['figures']:
        print(f"论文 {paper_id} 第 {page_index} 页没有图片")
        return None
    
    # 获取第一个figure的所有panels
    figure = page['figures'][0]
    panels = figure['panels']
    panel_count = similar_data['panel_count']
    
    # 生成配对列表
    pairs = get_panel_pairs(panels, panel_count)
    if not pairs:
        print(f"论文 {paper_id} 没有可配对的图片")
        return None
    
    print(f"找到 {len(pairs)} 对图片进行配对")
    
    # 创建输出目录
    output_dir = create_output_directory_structure(paper_id)
    
    # 处理每对图片
    processed_pairs = []
    for i, (panel_a_idx, panel_b_idx) in enumerate(pairs):
        panel_a = panels[panel_a_idx]
        panel_b = panels[panel_b_idx]
        
        print(f"  处理配对 {i+1}: {panel_a['panel_id']} -> {panel_b['panel_id']}")
        
        # 构建路径
        source_path = os.path.join(REAL_PDF_ROOT, panel_a['path'].replace('dataset/real/real_pdf/', ''))
        target_dir = os.path.join(output_dir, "panel", figure['figure_id'])
        os.makedirs(target_dir, exist_ok=True)
        target_path = os.path.join(target_dir, f"{panel_b['panel_id']}_tampered.png")
        
        try:
            # 复制并篡改图像
            tampered_path, operations = copy_and_tamper_image(
                source_path, target_path, figure['figure_id'], panel_b['panel_id']
            )
            
            # 更新panel_b的issues信息
            updated_panel_b = update_panel_issues(
                panel_b.copy(), operations, panel_a['panel_id'], panel_b['panel_id']
            )
            
            # 更新panel_b的路径
            updated_panel_b['path'] = os.path.join(
                f"{paper_id}_Duplication", "panel", figure['figure_id'], 
                f"{panel_b['panel_id']}_tampered.png"
            )
            
            processed_pairs.append({
                'original_panel': panel_a,
                'target_panel': updated_panel_b,
                'operations': operations,
                'tampered_path': tampered_path
            })
            
            print(f"    应用操作: {', '.join(operations)}")
            
        except Exception as e:
            print(f"    处理失败: {e}")
            continue
    
    if not processed_pairs:
        print(f"论文 {paper_id} 没有成功处理的配对")
        return None
    
    # 创建输出论文数据
    output_paper = paper.copy()
    
    # 更新处理过的panels
    for pair in processed_pairs:
        target_panel = pair['target_panel']
        # 找到对应的panel并更新
        for page in output_paper['pages']:
            if page['page_index'] == page_index:
                for fig in page['figures']:
                    if fig['figure_id'] == figure['figure_id']:
                        for i, panel in enumerate(fig['panels']):
                            if panel['panel_id'] == target_panel['panel_id']:
                                fig['panels'][i] = target_panel
                                break
    
    print(f"论文 {paper_id} 处理完成，成功处理 {len(processed_pairs)} 对图片")
    return output_paper

def merge_paper_results(paper_result1: Dict, paper_result2: Dict) -> Dict:
    """合并同一论文的多个页面处理结果"""
    merged_paper = paper_result1.copy()
    
    # 合并pages
    for page2 in paper_result2['pages']:
        # 检查是否已存在相同page_index的页面
        existing_page = None
        for i, page1 in enumerate(merged_paper['pages']):
            if page1['page_index'] == page2['page_index']:
                existing_page = i
                break
        
        if existing_page is not None:
            # 合并figures
            for fig2 in page2['figures']:
                # 检查是否已存在相同figure_id的figure
                existing_fig = None
                for i, fig1 in enumerate(merged_paper['pages'][existing_page]['figures']):
                    if fig1['figure_id'] == fig2['figure_id']:
                        existing_fig = i
                        break
                
                if existing_fig is not None:
                    # 合并panels，避免重复
                    existing_panels = merged_paper['pages'][existing_page]['figures'][existing_fig]['panels']
                    existing_panel_ids = {panel['panel_id'] for panel in existing_panels}
                    
                    for panel in fig2['panels']:
                        if panel['panel_id'] not in existing_panel_ids:
                            existing_panels.append(panel)
                            existing_panel_ids.add(panel['panel_id'])
                else:
                    # 添加新的figure
                    merged_paper['pages'][existing_page]['figures'].append(fig2)
        else:
            # 添加新的页面
            merged_paper['pages'].append(page2)
    
    return merged_paper

def main():
    """主函数"""
    print("开始生成图像复用数据集...")
    
    # 创建输出目录
    os.makedirs(OUTPUT_ROOT, exist_ok=True)
    
    # 加载数据
    print("加载相似图片标注数据...")
    similar_data_list = load_similar_images_data(SIMILAR_IMAGES_JSONL)
    print(f"加载了 {len(similar_data_list)} 条相似图片记录")
    
    print("加载基础JSON数据...")
    real_data = load_real_json(REAL_JSON_PATH)
    print(f"加载了 {len(real_data['papers'])} 篇论文")
    
    # 按paper_id分组处理，避免同一PDF的多条记录相互覆盖
    paper_groups = {}
    for similar_data in similar_data_list:
        pdf_path = similar_data['pdf_path']
        paper_id = int(os.path.basename(pdf_path).replace('.pdf', ''))
        if paper_id not in paper_groups:
            paper_groups[paper_id] = []
        paper_groups[paper_id].append(similar_data)
    
    print(f"发现 {len(paper_groups)} 篇不同的论文需要处理")
    
    # 处理每篇论文
    processed_papers = []
    total_processed = 0
    
    for paper_id, similar_data_list_for_paper in paper_groups.items():
        print(f"\n处理论文 {paper_id}，包含 {len(similar_data_list_for_paper)} 条记录")
        
        try:
            # 为每篇论文创建一个合并的结果
            paper_result = None
            
            for similar_data in similar_data_list_for_paper:
                result = process_single_paper(paper_id, similar_data, real_data)
                if result:
                    if paper_result is None:
                        paper_result = result
                    else:
                        # 合并多个页面的结果
                        paper_result = merge_paper_results(paper_result, result)
            
            if paper_result:
                processed_papers.append(paper_result)
                total_processed += 1
                print(f"✓ 论文 {paper_id} 处理完成")
            else:
                print(f"✗ 论文 {paper_id} 处理失败")
                
        except Exception as e:
            print(f"✗ 处理论文 {paper_id} 时出错: {e}")
            continue
    
    # 保存结果
    output_data = {
        "papers": processed_papers,
        "summary": {
            "total_processed_papers": total_processed,
            "total_similar_records": len(similar_data_list),
            "processing_rate": f"{total_processed}/{len(similar_data_list)}"
        }
    }
    
    with open(OUTPUT_JSON_PATH, 'w', encoding='utf-8') as f:
        json.dump(output_data, f, ensure_ascii=False, indent=2)
    
    print(f"\n处理完成！")
    print(f"成功处理 {total_processed} 篇论文")
    print(f"结果保存到: {OUTPUT_JSON_PATH}")
    print(f"图片保存到: {OUTPUT_ROOT}")

if __name__ == "__main__":
    main()
