// 压缩带宽持久化到数据库
const util = require('util');
const config = require('config');
const mysql = require('mysql');
const Promise = require('bluebird');
const redis = require('../lib/redis');
const moment = require('moment');
const surume = require('../lib/surume');
const { isValidTimeForCommand } = require('../lib/utility');
// DEBUG=kalifa-client* node scripts/persistence-common-stats.js  '2022-04-18 15:30:00' -t 'compression_flow_stats-2022-06' 'compression_flow_account_stats-2022-06' -a 138489 168478 14048 15690 19942 197813
const debug = require('debug')('kalifa-client:scripts:persistence-common-stats.js');
const validator = require('validator');
const { program, InvalidArgumentError } = require('commander');



// mysql数据库连接
const db_stats = mysql.createPool({
  connectionLimit: 20,
  host: config.mysql.upStats.host,
  port: config.mysql.upyun.port,
  user: config.mysql.upStats.user,
  password: config.mysql.upStats.password,
  database: config.mysql.upStats.database
});

program
  .description(`Compressed bandwidth persistence to the database. 
                -e.g. node scripts/persistence-common-stats.js '2022-04-18 15:30:00' -t 'compression_flow_stats-2022-06' 'compression_flow_account_stats-2022-06' -a 138489 168478 14048 15690 19942 197813`)
  .argument('<start_time>', 'start time', (v) => isValidTimeForCommand(v, 'YYYY-MM-DD HH:mm:ss', true))
  .option('-t, --tables <tables...>', 'tables', (v, previous) => {
    const time = v.slice(-7);
    isValidTimeForCommand(time, 'YYYY-MM', true);
    return previous.concat([v]);
  }, [])
  .option('-a, --account_arr <account_arr...>', 'account arr', (v, previous) => {
    if (!validator.isInt(v)) {
      throw new InvalidArgumentError('Not an integer number');
    }
    return previous.concat([parseInt(v, 10)]);
  }, [])
  .action(async (start_time, options) => {
    debug(`start to run script with parameter. start_time: ${start_time.format('YYYY-MM-DD HH:mm:ss')}, account_arr: ${JSON.stringify(options.account_arr)}, tables: ${JSON.stringify(options.tables)}`);
    await main (start_time, options.account_arr, options.tables);
  });

program.parseAsync(process.argv)
  .then(() => {
    console.log('DONE');
    process.exit(0);
  })
  .catch((err) => {
    console.error(err);
    process.exit(1);
  });

const time = Date.parse(new Date())/1000;
const now = new Date();
const date = now.setTime(now.getTime() + 60 * 24 * 3600 * 1000);
const month = moment(date).format('YYYY-MM');
const tables =[];
tables.push('compression_flow_stats-' + month, 'compression_flow_account_stats-' + month);
const pubmite = moment(now).format('YYYY-MM-DD HH:mm:ss');
// redis中 循环扫描指定周期时间对应的 key
async function findAllDomain(start_time, callback) {
  let result = [];
  const startTime = start_time / 1000;
  const timestamp = Date.parse(new Date())/1000;
  for (let i = startTime; i <= timestamp; i++) {
    const array = await redis.scan(0, `${time}`);
    array.forEach(data => {
      const string = data.split(':');
      const domainObj = string.slice(0, 1);
      result = result.concat(domainObj);
    });
    i += 300;
  }
  return callback(null, result);
}

/**
 * @param {bigint} bucket_id 域名对应空间编号
 * @param {binary} domain 域名
 * @param {bigint} compression_flow 压缩流量
 */

function incrBucketStats(start_time, bucket_id, domain, compression_flow, tables, callback) {
  const table_name = tables[0];
  var sql = util.format('INSERT INTO `%s` SET bucket_id =?, domain = "?", pubtime = "?", compression_flow = ? ' +
                        'ON DUPLICATE KEY UPDATE compression_flow = compression_flow + ?', table_name);

  db_stats.query(sql, [bucket_id, domain, start_time, compression_flow, compression_flow], function (err) {
    return callback(err);
  });
}

/**
 * @param {bigint} account_id 账号
 * @param {bigint} compression_flow 总压缩流量
 */

function incrAccountStats(start_time, account_id, compression_flow, tables, callback) {
  const table_name = tables[1];
  var sql = util.format('INSERT INTO `%s` SET account_id = ?, pubtime = "?", compression_flow = ? ' +
                        'ON DUPLICATE KEY UPDATE compression_flow = compression_flow + ?', table_name);

  db_stats.query(sql, [account_id, start_time, compression_flow, compression_flow], function (err) {
    return callback(err);
  });
}

async function main(start_time, account_arr, tables) {
  const incrBucketStatsAsync = Promise.promisify(incrBucketStats);
  const incrAccountStatsAsync = Promise.promisify(incrAccountStats);
  const findAllDomainAsync = Promise.promisify(findAllDomain);

  for (const account_id of account_arr) {
    // 找到账号下所有空间
    let { buckets } = await surume.callAsync('upyun.bucket.find', {since: 0, account_id: account_id});
    while (buckets.length) {
      let max_id = 0;
      max_id = buckets[0].bucket_id;
      for (const bucket of buckets) {
        // 找到空间下所有域名
        const { bucket_domains } = await surume.callAsync('upyun.bucket.domain.find', {bucket_id: bucket.bucket_id});
        const arr = await findAllDomainAsync(start_time);
        for (const domain of bucket_domains) {
          const index = arr.indexOf(domain);
          if (index !== -1) {
            try {
              const bf_cp_bytes = await redis.get(`${domain}:totalRsize:${time}`);
              const compression_flow = Number(bf_cp_bytes);
              await incrBucketStatsAsync(start_time, bucket.bucket_id, domain, compression_flow, tables);
              await incrAccountStatsAsync(start_time, account_id, compression_flow, tables);
            } catch (err) {
              console.log(err.message);
            }
          }
        }
      }
      const data = await surume.callAsync('upyun.bucket.find', {since: max_id, account_id: account_id});
      buckets = data.buckets;
    }
  }
}
