package com.tus.store;

import com.tus.model.UploadInfo;
import com.tus.util.FileUtil;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;

import java.io.IOException;
import java.sql.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

@Slf4j
public class SQLiteMetadataStore implements MetadataStore {
    private final String dbUrl;
    private final ObjectMapper objectMapper;
    private final Map<String, UploadInfo> cache; // 简单缓存

    public SQLiteMetadataStore(String dbPath) throws SQLException {

        FileUtil.createFileIfNotExists(dbPath);
        this.dbUrl = "jdbc:sqlite:" + dbPath;
        this.objectMapper = new ObjectMapper();
        this.cache = new ConcurrentHashMap<>();

        initDatabase();
        loadCache(); // 启动时加载所有未完成的上传到内存
    }

    private void initDatabase() throws SQLException {
        String createTableSQL = "CREATE TABLE IF NOT EXISTS uploads (\n" + "                    upload_id TEXT PRIMARY KEY,\n" + "                    file_name TEXT NOT NULL,\n" + "                    file_size INTEGER NOT NULL,\n" + "                    offset_val INTEGER DEFAULT 0,\n" + "                    file_path TEXT NOT NULL,\n" + "                    created_at INTEGER NOT NULL,\n" + "                    expires_at INTEGER NOT NULL,\n" + "                    checksum TEXT,\n" + "                    checksum_algorithm TEXT,\n" + "                    is_parallel BOOLEAN NOT NULL DEFAULT 0,\n" + "                    is_completed BOOLEAN NOT NULL DEFAULT 0,\n" + "                    is_verified BOOLEAN NOT NULL DEFAULT 0,\n" + "                    completed_chunks TEXT DEFAULT '[]',           -- JSON array of paths\n" + "                    chunk_offsets TEXT DEFAULT '{}'               -- JSON map: path -> offset\n" + "                )";

        try (Connection conn = getConnection(); Statement stmt = conn.createStatement()) {
            stmt.execute(createTableSQL);
        }
    }

    private Connection getConnection() throws SQLException {
        return DriverManager.getConnection(dbUrl);
    }

    /**
     * 保存新上传任务
     */
    public synchronized void saveUpload(UploadInfo info) {
        String sql = "INSERT INTO uploads (\n" + "                upload_id, file_name, file_size, offset_val, file_path,\n" + "                created_at, expires_at, checksum, checksum_algorithm,\n" + "                is_parallel, is_completed, is_verified,\n" + "                completed_chunks, chunk_offsets\n" + "            ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";

        try (Connection conn = getConnection(); PreparedStatement pstmt = conn.prepareStatement(sql)) {

            pstmt.setString(1, info.getUploadId());
            pstmt.setString(2, info.getFileName());
            pstmt.setLong(3, info.getFileSize());
            pstmt.setLong(4, info.getOffset());
            pstmt.setString(5, info.getFilePath());
            pstmt.setLong(6, info.getCreatedAt());
            pstmt.setLong(7, info.getExpiresAt());
            pstmt.setString(8, info.getChecksum());
            pstmt.setString(9, info.getChecksumAlgorithm());
            pstmt.setBoolean(10, info.isParallelUpload());
            pstmt.setBoolean(11, info.isCompleted());
            pstmt.setBoolean(12, info.isVerified());

            // 序列化 List 和 Map 为 JSON
            pstmt.setString(13, toJson(info.getCompletedChunks()));
            pstmt.setString(14, toJson(info.getChunkOffsets()));

            pstmt.executeUpdate();
            cache.put(info.getUploadId(), info);

        } catch (SQLException | IOException e) {
            throw new RuntimeException("Failed to save upload: " + info.getUploadId(), e);
        }
    }

    /**
     * 根据 uploadId 查询
     */
    public UploadInfo findByUploadId(String uploadId) {
        UploadInfo cached = cache.get(uploadId);
        if (cached != null) return cached;

        String sql = "SELECT * FROM uploads WHERE upload_id = ?";
        try (Connection conn = getConnection(); PreparedStatement pstmt = conn.prepareStatement(sql)) {

            pstmt.setString(1, uploadId);
            try (ResultSet rs = pstmt.executeQuery()) {
                if (rs.next()) {
                    UploadInfo info = fromResultSet(rs);
                    cache.put(uploadId, info);
                    return info;
                }
            }
        } catch (SQLException | IOException e) {
            throw new RuntimeException("Query failed for upload_id: " + uploadId, e);
        }
        return null;
    }

    /**
     * 更新偏移量（串行模式）
     */
    public void updateOffset(String uploadId, long offset) {
        String sql = "UPDATE uploads SET offset_val = ? WHERE upload_id = ?";
        try (Connection conn = getConnection(); PreparedStatement pstmt = conn.prepareStatement(sql)) {

            pstmt.setLong(1, offset);
            pstmt.setString(2, uploadId);
            pstmt.executeUpdate();

            UploadInfo info = cache.get(uploadId);
            if (info != null) info.setOffset(offset);

        } catch (SQLException e) {
            throw new RuntimeException("Failed to update offset for: " + uploadId, e);
        }
    }

    /**
     * 更新分片列表和偏移映射（并发模式）
     */
    public void updateChunks(String uploadId, List<String> chunks, Map<String, Long> offsets) {
        String sql = "UPDATE uploads SET completed_chunks = ?, chunk_offsets = ? WHERE upload_id = ?";
        try (Connection conn = getConnection(); PreparedStatement pstmt = conn.prepareStatement(sql)) {

            pstmt.setString(1, toJson(chunks));
            pstmt.setString(2, toJson(offsets));
            pstmt.setString(3, uploadId);
            pstmt.executeUpdate();

            UploadInfo info = cache.get(uploadId);
            if (info != null) {
                info.setCompletedChunks(new ArrayList<>(chunks));
                info.setChunkOffsets(new HashMap<>(offsets));
            }

        } catch (SQLException | IOException e) {
            throw new RuntimeException("Failed to update chunks for: " + uploadId, e);
        }
    }

    /**
     * 标记上传完成
     */
    public void markAsCompleted(String uploadId, String finalPath) {
        String sql = "UPDATE uploads SET is_completed = 1, file_path = ? WHERE upload_id = ?";
        try (Connection conn = getConnection(); PreparedStatement pstmt = conn.prepareStatement(sql)) {

            pstmt.setString(1, finalPath);
            pstmt.setString(2, uploadId);
            pstmt.executeUpdate();

            UploadInfo info = cache.get(uploadId);
            if (info != null) {
                info.setCompleted(true);
                info.setFilePath(finalPath);
            }

        } catch (SQLException e) {
            throw new RuntimeException("Failed to mark as completed: " + uploadId, e);
        }
    }

    /**
     * 标记上传完成
     */
    public void markAsCompleted(String uploadId) {
        String sql = "UPDATE uploads SET is_completed = 1 WHERE upload_id = ?";
        try (Connection conn = getConnection(); PreparedStatement pstmt = conn.prepareStatement(sql)) {

            pstmt.setString(1, uploadId);
            pstmt.executeUpdate();

            UploadInfo info = cache.get(uploadId);
            if (info != null) {
                info.setCompleted(true);
            }

        } catch (SQLException e) {
            throw new RuntimeException("Failed to mark as completed: " + uploadId, e);
        }
    }

    /**
     * 标记校验通过
     */
    public void markAsVerified(String uploadId) {
        String sql = "UPDATE uploads SET is_verified = 1 WHERE upload_id = ?";
        try (Connection conn = getConnection(); PreparedStatement pstmt = conn.prepareStatement(sql)) {

            pstmt.setString(1, uploadId);
            pstmt.executeUpdate();

            UploadInfo info = cache.get(uploadId);
            if (info != null) {
                info.setVerified(true);
            }

        } catch (SQLException e) {
            throw new RuntimeException("Failed to mark as verified: " + uploadId, e);
        }
    }

    /**
     * 删除上传记录（清理时调用）
     */
    public void deleteUpload(String uploadId) {
        String sql = "DELETE FROM uploads WHERE upload_id = ?";
        try (Connection conn = getConnection(); PreparedStatement pstmt = conn.prepareStatement(sql)) {

            pstmt.setString(1, uploadId);
            pstmt.executeUpdate();
            cache.remove(uploadId);

        } catch (SQLException e) {
            throw new RuntimeException("Failed to delete upload: " + uploadId, e);
        }
    }

    /**
     * 从 ResultSet 构建 UploadInfo
     */
    private UploadInfo fromResultSet(ResultSet rs) throws SQLException, IOException {
        UploadInfo info = new UploadInfo();
        info.setUploadId(rs.getString("upload_id"));
        info.setFileName(rs.getString("file_name"));
        info.setFileSize(rs.getLong("file_size"));
        info.setOffset(rs.getLong("offset_val"));
        info.setFilePath(rs.getString("file_path"));
        info.setCreatedAt(rs.getLong("created_at"));
        info.setExpiresAt(rs.getLong("expires_at"));
        info.setChecksum(rs.getString("checksum"));
        info.setChecksumAlgorithm(rs.getString("checksum_algorithm"));
        info.setParallelUpload(rs.getBoolean("is_parallel"));
        info.setCompleted(rs.getBoolean("is_completed"));
        info.setVerified(rs.getBoolean("is_verified"));

        // 反序列化 JSON 字段
        String chunksJson = rs.getString("completed_chunks");
        List<String> chunks = fromJson(chunksJson, new TypeReference<List<String>>() {
        });
        info.setCompletedChunks(chunks != null ? chunks : new ArrayList<>());

        String offsetsJson = rs.getString("chunk_offsets");
        Map<String, Long> offsets = fromJson(offsetsJson, new TypeReference<Map<String, Long>>() {
        });
        info.setChunkOffsets(offsets != null ? offsets : new HashMap<>());

        return info;
    }

    /**
     * 对象转 JSON 字符串
     */
    private String toJson(Object obj) throws IOException {
        return objectMapper.writeValueAsString(obj);
    }

    /**
     * JSON 字符串转对象
     */
    private <T> T fromJson(String json, TypeReference<T> typeRef) throws IOException {
        if (json == null || json.trim().isEmpty() || "null".equals(json)) {
            return null;
        }
        return objectMapper.readValue(json, typeRef);
    }

    /**
     * 启动时加载所有未完成的上传（用于恢复状态）
     */
    private void loadCache() {
        String sql = "SELECT * FROM uploads WHERE is_completed = 0 OR is_verified = 0";
        try (Connection conn = getConnection(); PreparedStatement pstmt = conn.prepareStatement(sql); ResultSet rs = pstmt.executeQuery()) {

            while (rs.next()) {
                UploadInfo info = fromResultSet(rs);
                cache.put(info.getUploadId(), info);
            }
            log.info("Loaded {} active uploads into memory", cache.size());
        } catch (SQLException | IOException e) {
            throw new RuntimeException("Failed to load uploads into cache", e);
        }
    }

    /**
     * 查询所有已过期的上传任务
     *
     * @param now 当前时间戳（毫秒）
     * @return 过期的 UploadInfo 列表
     */
    public List<UploadInfo> findExpired(long now) {
        String sql = "SELECT * FROM uploads WHERE expires_at < ?";
        List<UploadInfo> expired = new ArrayList<>();

        try (Connection conn = getConnection(); PreparedStatement pstmt = conn.prepareStatement(sql)) {

            pstmt.setLong(1, now);
            try (ResultSet rs = pstmt.executeQuery()) {
                while (rs.next()) {
                    UploadInfo info = fromResultSet(rs);
                    expired.add(info);
                }
            }
            log.debug("Found {} expired uploads (now={})", expired.size(), now);
            return expired;

        } catch (SQLException | IOException e) {
            log.error("Failed to query expired uploads", e);
            throw new RuntimeException("Database query failed: findExpired", e);
        }
    }
}