const fs = require("fs");
const path = require("path");
const Upload = require("../../models/upload");
const socketClientManager = require("../socketClient");

// 默认配置
const DEFAULT_CONFIG = {
  tempDir: path.join(__dirname, "../../../temp"),
  uploadDir: path.join(__dirname, "../../../uploads"),
  maxFileSize: 1024 * 1024 * 1024 * 1024, // 1TB
  chunkSize: 5 * 1024 * 1024, // 5MB
  cleanupInterval: 24 * 60 * 60 * 1000, // 24 hours
  allowedTypes: ["application/zip", "application/x-zip-compressed"],
};

class BigFileUpload {
  constructor(config = {}) {
    this.config = { ...DEFAULT_CONFIG, ...config };
    this._ensureDirectories();
    this._startCleanupTimer();
  }

  _ensureDirectories() {
    [this.config.tempDir, this.config.uploadDir].forEach((dir) => {
      if (!fs.existsSync(dir)) {
        fs.mkdirSync(dir, { recursive: true });
        console.log(`Created directory: ${dir}`);
      }
    });
  }

  _startCleanupTimer() {
    this.cleanupTimer = setInterval(
      () => this._cleanupTempFiles(),
      this.config.cleanupInterval
    );
    console.log("Started temporary file cleanup timer.");
  }

  async _cleanupTempFiles() {
    console.log("Running temporary file cleanup...");
    try {
      const files = await fs.promises.readdir(this.config.tempDir);
      const now = Date.now();

      for (const file of files) {
        const filePath = path.join(this.config.tempDir, file);
        try {
          const stats = await fs.promises.stat(filePath);
          const fileAge = now - stats.mtimeMs;

          if (fileAge > this.config.cleanupInterval) {
            await fs.promises.rm(filePath, { recursive: true, force: true });
            console.log(`Cleaned up temporary file/directory: ${filePath}`);
          }
        } catch (statError) {
          // If stat fails, it might be a partially created directory or file, try to remove it anyway
          console.warn(
            `Error stating file ${filePath} during cleanup, attempting removal:`,
            statError
          );
          try {
            await fs.promises.rm(filePath, { recursive: true, force: true });
            console.log(
              `Forcefully cleaned up problematic temporary file/directory: ${filePath}`
            );
          } catch (rmError) {
            console.error(
              `Failed to forcefully remove problematic temporary file/directory ${filePath}:`,
              rmError
            );
          }
        }
      }
    } catch (error) {
      console.error("Error during temporary file cleanup:", error);
    }
  }

  async checkFile(fileHash) {
    if (!fileHash) {
      throw new Error("File hash is required for checking file existence.");
    }
    try {
      const upload = await Upload.findOne({ fileHash, status: "completed" });
      if (upload) {
        return {
          exists: true,
          url: upload.path,
          originalName: upload.originalName,
          size: upload.size,
          mimeType: upload.mimeType,
        };
      }
      return { exists: false };
    } catch (error) {
      console.error(
        `Error checking file existence for hash ${fileHash}:`,
        error
      );
      throw new Error(
        "Failed to check file existence. Please try again later."
      );
    }
  }

  async getChunkStatus(identifier) {
    if (!identifier) {
      throw new Error("Identifier is required for getting chunk status.");
    }
    try {
      const chunkDir = path.join(this.config.tempDir, identifier);
      if (!fs.existsSync(chunkDir)) {
        return { uploaded: [] };
      }
      const files = await fs.promises.readdir(chunkDir);
      const uploaded = files
        .map((file) => parseInt(file.split(".")[0]))
        .filter((num) => !isNaN(num)); // Ensure only valid numbers are included
      return { uploaded };
    } catch (error) {
      console.error(
        `Error getting chunk status for identifier ${identifier}:`,
        error
      );
      throw new Error("Failed to get chunk status. Please try again later.");
    }
  }

  async handleChunk(
    chunkStream,
    chunkIndex,
    identifier,
    totalChunks,
    fileInfo
  ) {
    if (
      !chunkStream ||
      typeof chunkIndex !== "number" ||
      !identifier ||
      typeof totalChunks !== "number" ||
      !fileInfo
    ) {
      throw new Error("Invalid parameters for handling chunk.");
    }
    const { fileName, fileSize, mimeType } = fileInfo;
    if (!fileName || typeof fileSize !== "number" || !mimeType) {
      throw new Error("Invalid fileInfo for handling chunk.");
    }

    const fileHash = identifier.split("-")[0];

    try {
      let upload = await Upload.findOne({ fileHash });
      if (!upload) {
        upload = new Upload({
          fileHash,
          originalName: fileName,
          path: `/uploads/${fileHash}${path.extname(fileName)}`,
          size: fileSize,
          mimeType: mimeType,
          status: "pending",
        });
        await upload.save();
      } else if (upload.status !== "pending" && upload.status !== "pending") {
        // If a completed or failed record exists, but chunks are being uploaded, something is wrong.
        // Potentially, a new upload attempt for an already processed file hash.
        // For now, we'll allow re-pending by resetting to 'pending', but this might need more nuanced handling.
        upload.status = "pending";
        upload.completedAt = null; // Reset completion time if any
        await upload.save();
      }

      const chunkDir = path.join(this.config.tempDir, identifier);
      if (!fs.existsSync(chunkDir)) {
        fs.mkdirSync(chunkDir, { recursive: true });
      }

      const chunkPath = path.join(chunkDir, `${chunkIndex}.chunk`);
      // console.log(`Saving chunk ${chunkIndex} to ${chunkPath}`);
      // 使用流式处理而不是一次性加载到内存
      const writeStream = fs.createWriteStream(chunkPath);
      await new Promise((resolve, reject) => {
        chunkStream.pipe(writeStream);

        // 监听数据流结束事件
        chunkStream.on("end", () => {
          writeStream.end(); // 确保写入流正确结束
        });

        // 监听写入完成事件
        writeStream.on("finish", () => {
          resolve();
        });

        // 错误处理
        writeStream.on("error", (err) => {
          writeStream.destroy();
          reject(err);
        });

        chunkStream.on("error", (err) => {
          writeStream.destroy();
          reject(err);
        });

        // 确保资源清理
        writeStream.on("close", () => {
          if (chunkStream.destroy && !chunkStream.destroyed) {
            chunkStream.destroy();
          }
        });
      });

      const progress = Math.floor(((chunkIndex + 1) / totalChunks) * 100);
      // console.log(`Chunk ${chunkIndex} saved, progress: ${progress}%`);
      socketClientManager.sendToModule("upload", {
        type: "progress",
        payload: {
          identifier,
          progress,
          chunkIndex,
          totalChunks,
        },
      });

      return { success: true, progress };
    } catch (error) {
      console.error(
        `Error handling chunk: index=${chunkIndex}, identifier=${identifier}, fileInfo=${JSON.stringify(
          fileInfo
        )}:`,
        error
      );
      // Attempt to update DB to failed status if an entry exists
      try {
        const existingUpload = await Upload.findOne({ fileHash });
        if (existingUpload && existingUpload.status !== "completed") {
          existingUpload.status = "failed";
          await existingUpload.save();
        }
      } catch (dbError) {
        console.error(
          `Failed to update upload status to 'failed' for ${fileHash} after chunk error:`,
          dbError
        );
      }
      throw new Error(`Failed to save chunk ${chunkIndex}. ${error.message}`);
    }
  }

  async mergeChunks(identifier, fileName, totalChunks) {
    if (
      !identifier ||
      !fileName ||
      typeof totalChunks !== "number" ||
      totalChunks <= 0
    ) {
      throw new Error("Invalid parameters for merging chunks.");
    }

    const fileHash = identifier.split("-")[0];
    const taskId = `merge-${fileHash}-${Date.now()}`; // Add timestamp for uniqueness

    try {
      let upload = await Upload.findOne({ fileHash });
      if (!upload) {
        // This case should ideally not happen if handleChunk creates the record
        console.warn(
          `Upload record not found for fileHash ${fileHash} at merge time. Creating one.`
        );
        // We might not have all fileInfo here, this is a fallback
        upload = new Upload({
          fileHash,
          originalName: fileName,
          path: `/uploads/${fileHash}${path.extname(fileName)}`,
          // size and mimeType might be missing or inaccurate here
          status: "pending",
        });
      } else {
        upload.status = "pending"; // Explicitly 'pending_merge'
      }
      await upload.save();

      // Asynchronously process the merge task
      this._processMergeTask(
        identifier,
        fileName,
        totalChunks,
        taskId,
        fileHash
      ).catch(async (processingError) => {
        console.error(
          `Merge task ${taskId} failed for ${identifier}:`,
          processingError
        );
        try {
          const failedUpload = await Upload.findOne({ fileHash });
          if (failedUpload) {
            failedUpload.status = "failed";
            await failedUpload.save();
          }
          socketClientManager.sendToModule("upload", {
            type: "error",
            payload: {
              taskId,
              identifier,
              error: `Merge processing failed: ${processingError.message}`,
            },
          });
        } catch (dbError) {
          console.error(
            `Failed to update upload status to 'failed' for ${fileHash} after merge processing error:`,
            dbError
          );
        }
      });

      return {
        taskId,
        status: "pending",
        message: "File merge task started. Monitor progress via WebSocket.",
        fileHash,
        originalName: upload.originalName,
        size: upload.size,
        mimeType: upload.mimeType,
      };
    } catch (error) {
      console.error(
        `Error initiating merge for identifier ${identifier}:`,
        error
      );
      // Attempt to update DB to failed status
      try {
        const existingUpload = await Upload.findOne({ fileHash });
        if (existingUpload && existingUpload.status !== "completed") {
          existingUpload.status = "failed";
          await existingUpload.save();
        }
      } catch (dbError) {
        console.error(
          `Failed to update upload status to 'failed' for ${fileHash} after merge initiation error:`,
          dbError
        );
      }
      throw new Error(`Failed to start merge task. ${error.message}`);
    }
  }

  async _processMergeTask(identifier, fileName, totalChunks, taskId, fileHash) {
    const chunkDir = path.join(this.config.tempDir, identifier);
    const finalPath = path.join(
      this.config.uploadDir,
      `${fileHash}${path.extname(fileName)}`
    );
    let writeStream;

    try {
      console.log(
        `Starting merge process for task ${taskId}, identifier ${identifier}`
      );
      socketClientManager.sendToModule("upload", {
        type: "merge_started",
        payload: {
          taskId,
          identifier,
        },
      });

      writeStream = fs.createWriteStream(finalPath);

      for (let i = 0; i < totalChunks; i++) {
        const chunkPath = path.join(chunkDir, `${i}.chunk`);
        if (!fs.existsSync(chunkPath)) {
          throw new Error(
            `Chunk ${i} not found for identifier ${identifier} at path ${chunkPath}`
          );
        }
        const chunkBuffer = await fs.promises.readFile(chunkPath);
        if (!writeStream.write(chunkBuffer)) {
          // Handle backpressure
          await new Promise((resolve) => writeStream.once("drain", resolve));
        }

        const progress = Math.floor(((i + 1) / totalChunks) * 100);
        socketClientManager.sendToModule("upload", {
          type: "merge_progress",
          payload: {
            taskId,
            identifier,
            progress,
          },
        });
      }

      await new Promise((resolve, reject) => {
        writeStream.end(async () => {
          try {
            await fs.promises.rm(chunkDir, { recursive: true, force: true });
            console.log(`Cleaned up chunk directory: ${chunkDir}`);
            resolve();
          } catch (cleanupError) {
            console.error(
              `Error cleaning up chunk directory ${chunkDir}:`,
              cleanupError
            );
            reject(cleanupError); // Propagate cleanup error if critical
          }
        });
        writeStream.on("error", (err) => {
          console.error(`WriteStream error during merge for ${taskId}:`, err);
          reject(err);
        });
      });

      const upload = await Upload.findOneAndUpdate(
        { fileHash },
        {
          status: "completed",
          completedAt: new Date(),
          path: `/uploads/${fileHash}${path.extname(fileName)}`,
        },
        { new: true }
      );

      if (!upload) {
        // This should not happen if the record was created/updated before merge
        throw new Error(
          `Upload record not found for ${fileHash} after successful merge.`
        );
      }

      console.log(
        `Merge completed for task ${taskId}, file saved to ${finalPath}`
      );
      socketClientManager.sendToModule("upload", {
        type: "merge_completed",
        payload: {
          taskId,
          identifier,
          url: upload.path,
          fileHash: upload.fileHash,
          originalName: upload.originalName,
          size: upload.size,
          mimeType: upload.mimeType,
        },
      });
    } catch (error) {
      console.error(
        `Error during merge processing for task ${taskId} (${identifier}):`,
        error
      );
      // Ensure stream is closed if open
      if (writeStream && !writeStream.destroyed) {
        writeStream.destroy();
      }
      // Attempt to delete partially created file if merge failed mid-way
      if (fs.existsSync(finalPath)) {
        try {
          await fs.promises.unlink(finalPath);
          console.log(`Deleted partially merged file: ${finalPath}`);
        } catch (unlinkError) {
          console.error(
            `Failed to delete partially merged file ${finalPath}:`,
            unlinkError
          );
        }
      }
      // The global catch in mergeChunks will handle DB update and socket notification for this error.
      throw error; // Re-throw to be caught by the caller's .catch()
    }
  }

  validateFileType(mimeType) {
    if (!mimeType) return false;
    return this.config.allowedTypes.includes(mimeType);
  }

  validateFileSize(size) {
    if (typeof size !== "number" || size < 0) return false;
    return size <= this.config.maxFileSize;
  }

  // Call this method to gracefully shutdown the cleanup timer (e.g., on app exit)
  destroy() {
    if (this.cleanupTimer) {
      clearInterval(this.cleanupTimer);
      console.log("Stopped temporary file cleanup timer.");
    }
  }
}

const bigFileUploadInstance = new BigFileUpload();

// Graceful shutdown handling
process.on("SIGINT", () => bigFileUploadInstance.destroy());
process.on("SIGTERM", () => bigFileUploadInstance.destroy());

module.exports = bigFileUploadInstance;
