#!/usr/bin/env python3
"""
Android导出脚本 - 将PiDiNet模型转换为Android可用格式
"""

import torch
import torch.nn as nn
import numpy as np
import json
import os
import cv2
from PIL import Image
import torchvision.transforms as transforms

class PiDiNetBlock(nn.Module):
    """PiDiNet深度可分离卷积块"""
    def __init__(self, in_channels, out_channels, stride=1):
        super(PiDiNetBlock, self).__init__()
        # 深度卷积 (depthwise convolution)
        self.conv1 = nn.Conv2d(in_channels, in_channels, 3, stride=stride, padding=1, groups=in_channels, bias=False)
        # 点卷积 (pointwise convolution)  
        self.conv2 = nn.Conv2d(in_channels, out_channels, 1, bias=False)
        
        # 如果输入输出通道数或步长不同，添加shortcut
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Conv2d(in_channels, out_channels, 1, stride=stride, bias=True)
        else:
            self.shortcut = None
    
    def forward(self, x):
        out = self.conv1(x)
        out = self.conv2(out)
        
        if self.shortcut is not None:
            x = self.shortcut(x)
        
        return out + x

class PiDiNet(nn.Module):
    """正确匹配预训练权重的PiDiNet模型"""
    
    def __init__(self):
        super(PiDiNet, self).__init__()
        
        # 初始卷积块 - 20通道，3x3卷积
        self.init_block = nn.Conv2d(3, 20, kernel_size=3, stride=2, padding=1, bias=False)
        
        # 四个阶段的块，使用实际的通道数
        # 第一阶段: 20通道
        self.block1_1 = PiDiNetBlock(20, 20, stride=1)
        self.block1_2 = PiDiNetBlock(20, 20, stride=1) 
        self.block1_3 = PiDiNetBlock(20, 20, stride=1)
        
        # 第二阶段: 40通道  
        self.block2_1 = PiDiNetBlock(20, 40, stride=2)
        self.block2_2 = PiDiNetBlock(40, 40, stride=1)
        self.block2_3 = PiDiNetBlock(40, 40, stride=1)
        self.block2_4 = PiDiNetBlock(40, 40, stride=1)
        
        # 第三阶段: 80通道
        self.block3_1 = PiDiNetBlock(40, 80, stride=2)
        self.block3_2 = PiDiNetBlock(80, 80, stride=1)
        self.block3_3 = PiDiNetBlock(80, 80, stride=1)
        self.block3_4 = PiDiNetBlock(80, 80, stride=1)
        
        # 第四阶段: 80通道 (保持不变)
        self.block4_1 = PiDiNetBlock(80, 80, stride=2)
        self.block4_2 = PiDiNetBlock(80, 80, stride=1)
        self.block4_3 = PiDiNetBlock(80, 80, stride=1)
        self.block4_4 = PiDiNetBlock(80, 80, stride=1)
        
        # 特征减少和分类器 - 使用实际的通道数
        self.conv_reduces = nn.ModuleList([
            nn.Sequential(nn.Conv2d(20, 1, 1, bias=True)),  # 20 -> 1
            nn.Sequential(nn.Conv2d(40, 1, 1, bias=True)),  # 40 -> 1  
            nn.Sequential(nn.Conv2d(80, 1, 1, bias=True)),  # 80 -> 1
            nn.Sequential(nn.Conv2d(80, 1, 1, bias=True)),  # 80 -> 1
        ])
        
        # 最终分类器
        self.classifier = nn.Conv2d(4, 1, 1, bias=True)
        
    def forward(self, x):
        batch_size, _, h, w = x.shape
        
        # 初始特征提取
        x = self.init_block(x)
        
        # 第一阶段 - 20通道
        x1 = self.block1_1(x)
        x1 = self.block1_2(x1)
        x1 = self.block1_3(x1)
        
        # 第二阶段 - 40通道
        x2 = self.block2_1(x1)
        x2 = self.block2_2(x2)
        x2 = self.block2_3(x2)
        x2 = self.block2_4(x2)
        
        # 第三阶段 - 80通道
        x3 = self.block3_1(x2)
        x3 = self.block3_2(x3)
        x3 = self.block3_3(x3)
        x3 = self.block3_4(x3)
        
        # 第四阶段 - 80通道
        x4 = self.block4_1(x3)
        x4 = self.block4_2(x4)
        x4 = self.block4_3(x4)
        x4 = self.block4_4(x4)
        
        # 特征融合
        features = [x1, x2, x3, x4]
        edge_maps = []
        
        # 计算目标尺寸
        target_h, target_w = h//2, w//2
        
        for i, feat in enumerate(features):
            edge = self.conv_reduces[i](feat)
            edge = torch.nn.functional.interpolate(edge, size=(target_h, target_w), mode='bilinear', align_corners=False)
            edge_maps.append(edge)
        
        # 合并特征
        fused = torch.cat(edge_maps, dim=1)
        final_edge = self.classifier(fused)
        
        # 上采样到原始尺寸
        final_edge = torch.nn.functional.interpolate(final_edge, size=(h, w), mode='bilinear', align_corners=False)
        
        return torch.sigmoid(final_edge)

def remove_module_prefix(state_dict):
    """移除state_dict中的module.前缀"""
    new_state_dict = {}
    for key, value in state_dict.items():
        if key.startswith('module.'):
            new_key = key[7:]  # 移除'module.'前缀
        else:
            new_key = key
        new_state_dict[new_key] = value
    return new_state_dict

class AndroidModelExporter:
    """Android模型导出器"""
    
    def __init__(self, model_path):
        self.model_path = model_path
        self.device = torch.device('cpu')  # Android使用CPU推理
        
    def export_onnx_model(self, output_path="models/pidinet_mobile.onnx"):
        """导出ONNX模型文件"""
        print("正在导出ONNX模型...")
        
        # 创建输出目录
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        
        # 加载模型
        model = PiDiNet()
        checkpoint = torch.load(self.model_path, map_location=self.device)
        
        if 'state_dict' in checkpoint:
            state_dict = remove_module_prefix(checkpoint['state_dict'])
            model.load_state_dict(state_dict, strict=False)
        else:
            state_dict = remove_module_prefix(checkpoint)
            model.load_state_dict(state_dict, strict=False)
        
        model.eval()
        
        # 创建示例输入 (batch_size=1, channels=3, height=512, width=512)
        dummy_input = torch.randn(1, 3, 512, 512)
        
        try:
            # 导出ONNX模型
            torch.onnx.export(
                model,
                dummy_input,
                output_path,
                export_params=True,
                opset_version=11,
                do_constant_folding=True,
                input_names=['input'],
                output_names=['output'],
                dynamic_axes={
                    'input': {0: 'batch_size'},
                    'output': {0: 'batch_size'}
                }
            )
            
            print(f"✅ ONNX模型已保存: {output_path}")
            
            # 验证模型大小
            model_size = os.path.getsize(output_path) / (1024 * 1024)
            print(f"   模型大小: {model_size:.1f} MB")
            
            return output_path
            
        except Exception as e:
            print(f"❌ ONNX导出失败: {str(e)}")
            return None
    
    def export_tflite_model(self, onnx_path, output_path="models/pidinet_mobile.tflite"):
        """导出TensorFlow Lite模型 (可选)"""
        try:
            import onnx2tf
            import tensorflow as tf
            
            print("正在导出TFLite模型...")
            
            # 创建输出目录
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            # ONNX转TensorFlow (需要临时目录)
            temp_tf_dir = "temp_tf_model"
            onnx2tf.convert(
                input_onnx_file_path=onnx_path,
                output_folder_path=temp_tf_dir
            )
            
            # 转换为TFLite
            converter = tf.lite.TFLiteConverter.from_saved_model(temp_tf_dir)
            converter.optimizations = [tf.lite.Optimize.DEFAULT]
            tflite_model = converter.convert()
            
            # 保存TFLite模型
            with open(output_path, 'wb') as f:
                f.write(tflite_model)
            
            print(f"✅ TFLite模型已导出: {output_path}")
            
            # 清理临时文件
            import shutil
            shutil.rmtree(temp_tf_dir, ignore_errors=True)
            
            return output_path
            
        except ImportError:
            print("⚠️ TFLite导出需要onnx2tf，跳过TFLite导出")
            return None
        except Exception as e:
            print(f"❌ TFLite导出失败: {str(e)}")
            return None
    
    def generate_model_config(self, output_path="models/model_config.json"):
        """生成模型配置文件"""
        print("正在生成模型配置...")
        
        config = {
            "model_info": {
                "name": "PiDiNet-Tiny",
                "version": "1.0",
                "description": "Lightweight document edge detection model",
                "input_size": [512, 512],
                "input_channels": 3,
                "output_channels": 1
            },
            "preprocessing": {
                "resize_method": "bilinear",
                "normalize": {
                    "mean": [0.485, 0.456, 0.406],
                    "std": [0.229, 0.224, 0.225]
                }
            },
            "postprocessing": {
                "threshold": 0.5,
                "min_contour_area": 1000,
                "epsilon_factor": 0.02
            },
            "android_settings": {
                "onnx_runtime_version": "1.12.1",
                "execution_provider": "CPUExecutionProvider",
                "inter_op_num_threads": 1,
                "intra_op_num_threads": 1
            }
        }
        
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(config, f, indent=2, ensure_ascii=False)
        
        print(f"✅ 配置文件已生成: {output_path}")
        return output_path
    
    def generate_android_code(self):
        """生成Android集成代码"""
        print("正在生成Android代码...")
        
        # 创建Android代码目录
        android_dir = "android"
        os.makedirs(android_dir, exist_ok=True)
        
        # 核心检测类
        detector_java = '''package com.yourapp.pidinet;

import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Point;
import ai.onnxruntime.*;
import java.nio.FloatBuffer;
import java.util.*;

public class DocumentEdgeDetector {
    private static final String MODEL_NAME = "pidinet_mobile.onnx";
    private static final int INPUT_SIZE = 512;
    private static final float[] MEAN = {0.485f, 0.456f, 0.406f};
    private static final float[] STD = {0.229f, 0.224f, 0.225f};
    
    private OrtEnvironment env;
    private OrtSession session;
    private Context context;
    
    public DocumentEdgeDetector(Context context) throws OrtException {
        this.context = context;
        initializeModel();
    }
    
    private void initializeModel() throws OrtException {
        env = OrtEnvironment.getEnvironment();
        OrtSession.SessionOptions opts = new OrtSession.SessionOptions();
        opts.addCPU(false);
        
        // 从assets加载模型
        byte[] modelBytes = loadModelFromAssets();
        session = env.createSession(modelBytes, opts);
    }
    
    private byte[] loadModelFromAssets() {
        try {
            java.io.InputStream is = context.getAssets().open(MODEL_NAME);
            byte[] buffer = new byte[is.available()];
            is.read(buffer);
            is.close();
            return buffer;
        } catch (Exception e) {
            throw new RuntimeException("Failed to load model from assets", e);
        }
    }
    
    public DetectionResult detectDocument(Bitmap bitmap) {
        try {
            // 预处理
            float[][][] input = preprocessImage(bitmap);
            
            // 创建输入tensor
            long[] inputShape = {1, 3, INPUT_SIZE, INPUT_SIZE};
            OnnxTensor inputTensor = OnnxTensor.createTensor(env, input);
            
            // 推理
            Map<String, OnnxTensor> inputs = Collections.singletonMap("input", inputTensor);
            OrtSession.Result result = session.run(inputs);
            
            // 获取输出
            float[][][][] output = (float[][][][]) result.get(0).getValue();
            
            // 后处理
            return postprocessOutput(output[0][0], bitmap.getWidth(), bitmap.getHeight());
            
        } catch (Exception e) {
            e.printStackTrace();
            return new DetectionResult(false, null, null);
        }
    }
    
    private float[][][] preprocessImage(Bitmap bitmap) {
        // 调整大小到512x512
        Bitmap resized = Bitmap.createScaledBitmap(bitmap, INPUT_SIZE, INPUT_SIZE, true);
        
        float[][][] input = new float[3][INPUT_SIZE][INPUT_SIZE];
        
        for (int y = 0; y < INPUT_SIZE; y++) {
            for (int x = 0; x < INPUT_SIZE; x++) {
                int pixel = resized.getPixel(x, y);
                
                // 提取RGB并归一化
                float r = ((pixel >> 16) & 0xFF) / 255.0f;
                float g = ((pixel >> 8) & 0xFF) / 255.0f;
                float b = (pixel & 0xFF) / 255.0f;
                
                // 应用ImageNet标准化
                input[0][y][x] = (r - MEAN[0]) / STD[0];
                input[1][y][x] = (g - MEAN[1]) / STD[1];
                input[2][y][x] = (b - MEAN[2]) / STD[2];
            }
        }
        
        return input;
    }
    
    private DetectionResult postprocessOutput(float[][] edgeMap, int origWidth, int origHeight) {
        // 这里实现文档角点检测算法
        // 简化版本，实际使用中需要更复杂的轮廓检测
        
        List<Point> corners = new ArrayList<>();
        // TODO: 实现完整的轮廓检测和四边形拟合
        
        boolean hasDocument = corners.size() == 4;
        return new DetectionResult(hasDocument, corners, edgeMap);
    }
    
    public void close() {
        try {
            if (session != null) session.close();
            if (env != null) env.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
    
    public static class DetectionResult {
        public final boolean hasDocument;
        public final List<Point> corners;
        public final float[][] edgeMap;
        
        DetectionResult(boolean hasDocument, List<Point> corners, float[][] edgeMap) {
            this.hasDocument = hasDocument;
            this.corners = corners;
            this.edgeMap = edgeMap;
        }
    }
}'''
        
        # 保存核心检测类
        with open(os.path.join(android_dir, "DocumentEdgeDetector.java"), 'w', encoding='utf-8') as f:
            f.write(detector_java)
        
        # 使用示例
        main_activity_java = '''package com.yourapp.pidinet;

import android.Manifest;
import android.content.pm.PackageManager;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.graphics.Point;
import android.os.Bundle;
import android.widget.ImageView;
import androidx.annotation.NonNull;
import androidx.appcompat.app.AppCompatActivity;
import androidx.camera.core.*;
import androidx.camera.lifecycle.ProcessCameraProvider;
import androidx.camera.view.PreviewView;
import androidx.core.app.ActivityCompat;
import androidx.core.content.ContextCompat;
import com.google.common.util.concurrent.ListenableFuture;

public class MainActivity extends AppCompatActivity {
    private static final int CAMERA_PERMISSION_CODE = 100;
    
    private PreviewView previewView;
    private ImageView overlayView;
    private DocumentEdgeDetector detector;
    
    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);
        
        previewView = findViewById(R.id.preview_view);
        overlayView = findViewById(R.id.overlay_view);
        
        try {
            detector = new DocumentEdgeDetector(this);
        } catch (Exception e) {
            e.printStackTrace();
            finish();
            return;
        }
        
        if (checkCameraPermission()) {
            startCamera();
        } else {
            requestCameraPermission();
        }
    }
    
    private boolean checkCameraPermission() {
        return ContextCompat.checkSelfPermission(this, Manifest.permission.CAMERA) 
                == PackageManager.PERMISSION_GRANTED;
    }
    
    private void requestCameraPermission() {
        ActivityCompat.requestPermissions(this, 
                new String[]{Manifest.permission.CAMERA}, 
                CAMERA_PERMISSION_CODE);
    }
    
    @Override
    public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, 
                                         @NonNull int[] grantResults) {
        super.onRequestPermissionsResult(requestCode, permissions, grantResults);
        if (requestCode == CAMERA_PERMISSION_CODE) {
            if (grantResults.length > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
                startCamera();
            } else {
                finish();
            }
        }
    }
    
    private void startCamera() {
        ListenableFuture<ProcessCameraProvider> cameraProviderFuture = 
                ProcessCameraProvider.getInstance(this);
        
        cameraProviderFuture.addListener(() -> {
            try {
                ProcessCameraProvider cameraProvider = cameraProviderFuture.get();
                bindPreview(cameraProvider);
            } catch (Exception e) {
                e.printStackTrace();
            }
        }, ContextCompat.getMainExecutor(this));
    }
    
    private void bindPreview(@NonNull ProcessCameraProvider cameraProvider) {
        Preview preview = new Preview.Builder().build();
        CameraSelector cameraSelector = CameraSelector.DEFAULT_BACK_CAMERA;
        
        ImageAnalysis imageAnalysis = new ImageAnalysis.Builder()
                .setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
                .build();
        
        imageAnalysis.setAnalyzer(ContextCompat.getMainExecutor(this), image -> {
            // 转换为Bitmap并检测
            Bitmap bitmap = imageProxyToBitmap(image);
            DocumentEdgeDetector.DetectionResult result = detector.detectDocument(bitmap);
            
            // 在UI线程更新叠加层
            runOnUiThread(() -> updateOverlay(result));
            
            image.close();
        });
        
        preview.setSurfaceProvider(previewView.getSurfaceProvider());
        
        cameraProvider.unbindAll();
        cameraProvider.bindToLifecycle(this, cameraSelector, preview, imageAnalysis);
    }
    
    private Bitmap imageProxyToBitmap(ImageProxy image) {
        // 这里需要实现ImageProxy到Bitmap的转换
        // 简化版本，实际实现需要处理不同的图像格式
        return null; // TODO: 实现转换逻辑
    }
    
    private void updateOverlay(DocumentEdgeDetector.DetectionResult result) {
        if (result.hasDocument && result.corners != null) {
            // 创建叠加图像显示检测到的边框
            Bitmap overlayBitmap = Bitmap.createBitmap(
                    overlayView.getWidth(), overlayView.getHeight(), Bitmap.Config.ARGB_8888);
            Canvas canvas = new Canvas(overlayBitmap);
            
            Paint paint = new Paint();
            paint.setColor(android.graphics.Color.GREEN);
            paint.setStrokeWidth(5f);
            paint.setStyle(Paint.Style.STROKE);
            
            // 绘制检测到的四边形
            if (result.corners.size() == 4) {
                for (int i = 0; i < 4; i++) {
                    Point start = result.corners.get(i);
                    Point end = result.corners.get((i + 1) % 4);
                    canvas.drawLine(start.x, start.y, end.x, end.y, paint);
                }
            }
            
            overlayView.setImageBitmap(overlayBitmap);
        } else {
            overlayView.setImageBitmap(null);
        }
    }
    
    @Override
    protected void onDestroy() {
        super.onDestroy();
        if (detector != null) {
            detector.close();
        }
    }
}'''
        
        # 保存MainActivity示例
        with open(os.path.join(android_dir, "MainActivity.java"), 'w', encoding='utf-8') as f:
            f.write(main_activity_java)
        
        # 依赖配置
        dependencies_gradle = '''// 在app/build.gradle中添加以下依赖

dependencies {
    // ONNX Runtime for Android
    implementation 'com.microsoft.onnxruntime:onnxruntime-android:1.12.1'
    
    // 相机功能
    def camerax_version = "1.2.3"
    implementation "androidx.camera:camera-core:${camerax_version}"
    implementation "androidx.camera:camera-camera2:${camerax_version}"
    implementation "androidx.camera:camera-lifecycle:${camerax_version}"
    implementation "androidx.camera:camera-view:${camerax_version}"
    
    // 其他标准依赖
    implementation 'androidx.appcompat:appcompat:1.6.1'
    implementation 'com.google.android.material:material:1.9.0'
    implementation 'androidx.constraintlayout:constraintlayout:2.1.4'
}

android {
    compileSdk 33
    
    defaultConfig {
        minSdk 21
        targetSdk 33
    }
    
    // 防止ONNX Runtime库冲突
    packagingOptions {
        pickFirst '**/libc++_shared.so'
        pickFirst '**/libjsc.so'
    }
    
    compileOptions {
        sourceCompatibility JavaVersion.VERSION_1_8
        targetCompatibility JavaVersion.VERSION_1_8
    }
}'''
        
        # 保存依赖配置
        with open(os.path.join(android_dir, "dependencies.gradle"), 'w', encoding='utf-8') as f:
            f.write(dependencies_gradle)
        
        print(f"✅ Android代码已生成: {android_dir}/")
        print("   📁 生成的文件:")
        print("   ├── DocumentEdgeDetector.java  # 核心检测类")
        print("   ├── MainActivity.java          # 使用示例")
        print("   └── dependencies.gradle        # 依赖配置")
        
        return android_dir
    
    def export_all(self):
        """执行完整的Android导出流程"""
        print("🚀 开始导出Android产物...")
        
        results = {}
        
        # 1. 导出ONNX模型
        onnx_path = self.export_onnx_model()
        if onnx_path:
            results['onnx'] = onnx_path
        else:
            print("❌ ONNX导出失败，无法继续")
            return None
        
        # 2. 导出TFLite模型 (可选)
        tflite_path = self.export_tflite_model(onnx_path)
        if tflite_path:
            results['tflite'] = tflite_path
        
        # 3. 生成配置文件
        config_path = self.generate_model_config()
        if config_path:
            results['config'] = config_path
        
        # 4. 生成Android代码
        android_dir = self.generate_android_code()
        if android_dir:
            results['android_code'] = android_dir
        
        print("\n🎉 Android产物导出完成!")
        print("\n📁 生成的文件:")
        for key, path in results.items():
            if key == 'android_code':
                print(f"├── {path}/                    # Android集成代码")
            else:
                print(f"├── {path}")
        
        print("\n📖 下一步:")
        print("1. 将 models/ 目录中的文件复制到Android项目的assets目录")
        print("2. 将 android/ 目录中的Java代码集成到您的项目")
        print("3. 添加dependencies.gradle中的依赖到您的build.gradle")
        print("4. 参考README_Android集成指南.md进行详细配置")
        
        return results

# 命令行接口
if __name__ == "__main__":
    model_path = "table5_pidinet-tiny-l.pth"
    
    if not os.path.exists(model_path):
        print(f"❌ 模型文件不存在: {model_path}")
        exit(1)
    
    try:
        exporter = AndroidModelExporter(model_path)
        results = exporter.export_all()
        
        if results:
            print(f"\n✅ 导出成功! 共生成 {len(results)} 个产物")
        else:
            print("\n❌ 导出失败")
            exit(1)
            
    except Exception as e:
        print(f"\n❌ 导出过程中发生错误: {str(e)}")
        exit(1) 