import path from "path";
import fs from "fs";
import { fileURLToPath } from "url";
import pLimit from "p-limit";
import cliProgress from "cli-progress";
import { img } from "../crawlers/canvas-icon/types.js";

// 生成随机 IP，避免生成私有地址或者保留地址
export function getRandomIP() {
  const ipBlocks = [
    [1, 126], // Class A (excluding 10.0.0.0 - 10.255.255.255)
    [128, 191], // Class B (excluding 172.16.0.0 - 172.31.255.255)
    [192, 223], // Class C (excluding 192.168.0.0 - 192.168.255.255)
  ];

  let firstSegment;
  let secondSegment;

  // Generate valid first and second segments
  while (true) {
    const [start, end] = ipBlocks[Math.floor(Math.random() * ipBlocks.length)];
    firstSegment = Math.floor(Math.random() * (end - start + 1)) + start;
    secondSegment = Math.floor(Math.random() * 256);

    // Exclude 10.x.x.x, 172.16.x.x - 172.31.x.x, and 192.168.x.x ranges
    if (
      !(
        firstSegment === 10 ||
        (firstSegment === 172 && secondSegment >= 16 && secondSegment <= 31) ||
        (firstSegment === 192 && secondSegment === 168)
      )
    ) {
      break;
    }
  }

  return `${firstSegment}.${secondSegment}.${Math.floor(
    Math.random() * 256
  )}.${Math.floor(Math.random() * 256)}`;
}

// 生成随机 User-Agent
export function getRandomUserAgent() {
  const userAgents = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/57.0",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
    // 用爬虫的UA ，访问不到下一页链接
    // "User-Agent": "Mozilla/5.0 (compatible; Baiduspider-render/2.0; +http://www.baidu.com/search/spider.html)"
  ];
  return userAgents[Math.floor(Math.random() * userAgents.length)];
}

// 生成随机 Accept-Encoding
export function getRandomAcceptEncoding(): string {
  const encodings = ["gzip", "deflate", "br", "zstd"];
  const selectedEncodings: string[] = [];

  // Randomly decide the number of encodings to include
  const numberOfEncodings = Math.floor(Math.random() * encodings.length) + 1;

  // Randomly select encodings without duplication
  while (selectedEncodings.length < numberOfEncodings) {
    const randomEncoding =
      encodings[Math.floor(Math.random() * encodings.length)];
    if (!selectedEncodings.includes(randomEncoding)) {
      selectedEncodings.push(randomEncoding);
    }
  }

  // Join selected encodings into a comma-separated string
  return selectedEncodings.join(", ");
}

// 生成随机 Accept-Language
export function getRandomAcceptLanguage() {
  // 定义一些可能的语言标签和对应的权重
  const languages = [
    { code: "zh-CN", weight: 1.0 },
    { code: "en", weight: 0.9 },
    { code: "fr", weight: 0.8 },
    { code: "de", weight: 0.7 },
    { code: "es", weight: 0.6 },
    // 可以根据需要添加更多的语言和权重
  ];

  // 随机选择语言的数量
  const numberOfLanguages = Math.floor(Math.random() * languages.length) + 1;

  // 随机选择语言并生成 Accept-Language 字符串
  const selectedLanguages: typeof languages = [];
  while (selectedLanguages.length < numberOfLanguages) {
    const language = languages[Math.floor(Math.random() * languages.length)];
    if (!selectedLanguages.some((lang) => lang.code === language.code)) {
      selectedLanguages.push(language);
    }
  }

  // 将选择的语言和权重组合成一个字符串
  return selectedLanguages
    .map((lang) => `${lang.code};q=${lang.weight}`)
    .join(", ");
}

// 生成当前时间戳，格式为 yyyy-mm-dd
export function getCurrentDate() {
  const today = new Date();
  const year = today.getFullYear();
  const month = today.getMonth() + 1;
  const day = today.getDate();

  // 使用slice(-2)来确保月份和日期始终是两位数
  const nowmonth = month.toString().padStart(2, "0");
  const nowDay = day.toString().padStart(2, "0");

  const now = year + "-" + nowmonth + "-" + nowDay;
  return now;
}

//休眠函数: 随机休眠ms-ms2秒
export async function sleep(ms: number, ms2: number) {
  const delay = Math.floor(Math.random() * ms) + ms2;
  await new Promise((resolve) => setTimeout(resolve, delay));
}

// ============================== 爬虫通用的文件处理类 ==============================
// 基础配置
export type Config = {
  lastProcessedKeyword?: string; // 上一个完成的 key
  lastProcessedPage?: number; // 上一个完成的页数
  lastEcPage?: number; // 编辑精选的爬取页数
  outDir: string; // 爬虫图片输出目录，相对项目路径
  keywords: string[]; // 爬虫关键词
  concurrency: number; // 下载并发数
};

// 爬虫通用文件处理类
export class CrawlerFileUtils<T extends Config> {
  __filename: string; // 获取当前模块的文件路径
  __dirname: string; // 获取当前模块的目录路径
  errorLogFilePath: string; // 错误日志文件路径
  statusFilePath: string; // 配置文件路径
  config: T;
  outDirPath: string; // 爬虫图片输出目录，相对项目路径
  // 传递当前模块的 URL ===  new CrawlerFileUtils(import.meta.url)
  constructor(moduleUrl: string) {
    this.__filename = fileURLToPath(moduleUrl);
    this.__dirname = path.dirname(this.__filename);
    this.statusFilePath = path.resolve(this.__dirname, "runtime-config.json");
    // 如果没有配置文件，则copy一个 config.json
    if (!fs.existsSync(this.statusFilePath)) {
      this.statusFilePath = path.resolve(this.__dirname, "config.json");
    }
    this.errorLogFilePath = path.resolve(this.__dirname, "error.log");
    this.config = this.readConfig();
    this.outDirPath = path.resolve(process.cwd(), this.config.outDir);
  }

  // 读取配置文件
  readConfig(): T {
    const json = JSON.parse(fs.readFileSync(this.statusFilePath, "utf-8")) as T;
    return json;
  }

  // 将爬取状态写入文件
  writeConfig(status: Partial<T>): void {
    this.config = { ...this.config, ...status };
    fs.writeFileSync(
      this.statusFilePath,
      JSON.stringify(this.config, null, 2),
      "utf-8"
    );
  }

  // 记录错误日志到本地
  logError(error: any): void {
    const errorMsg = `[${new Date().toISOString()}] ${
      error ? error.stack || error : ""
    }\n`;
    fs.appendFileSync(this.errorLogFilePath, errorMsg, "utf-8");
    console.error(error);
  }

  // 断点续爬，从上次中断位置继续
  resumeCrawling() {
    const lastProcessedKeyword = this.config.lastProcessedKeyword;
    const keywords = this.config.keywords;
    let startIndex = lastProcessedKeyword
      ? keywords.indexOf(lastProcessedKeyword) + 1
      : 0;
    // 获取上次爬取的页数
    const PageToprocess = (this.config.lastProcessedPage || 0) + 1;
    // 获取上次爬取的编辑精选页数
    const ecPageToProcess = (this.config.lastEcPage || 0) + 1;

    if ((startIndex !== 0 && PageToprocess !== 1) || ecPageToProcess !== 1) {
      console.log(`⭐ 发现有上一次存档，断点续爬启动！`);
    }
    const keywordsToProcess = keywords.slice(startIndex);

    return { keywordsToProcess, PageToprocess, ecPageToProcess };
  }
}
