import ImageBlocks, { ImageBlock } from './ImageBlock'
import tf from './tf'
import scaleModel from './models/scaleModel'
import { getColorU8ArrayFromTensor } from './utils'
import noiseModel from './models/noiseModel'
import RealESRGANAnimModel from './models/RealESRGANAnimModel'
import RealESRGANModel from './models/RealESRGANModel'
import {
  ImageUpscaleParams,
  LoadStatus,
  RespWorkerMessage,
  RunBack,
  SendWorkerMessage,
  UpscaleModel,
} from './types'
import { BaseConvolutionModel } from './models/BaseConvolutionModel'
import { sleep } from '@/utils/common'
import SimpleLoadHandler from './models/loadHandler/SimpleLoadHandler'
const models = {
  waifu_2x_scale: scaleModel,
  waifu_2x_noise: noiseModel,
  RealESRGANAnimModel,
  RealESRGANModel,
}
const modelList: { name: UpscaleModel; model: BaseConvolutionModel }[] = []
for (let modelName in models) {
  modelList.push({
    name: modelName as UpscaleModel,
    model: models[modelName],
  })
}
/**
 * 开始必须初始化的模型
 */
const mustInitLoadModels: UpscaleModel[] = [
  UpscaleModel.waifu_2x_scale,
  UpscaleModel.waifu_2x_noise,
]
const respWorkMessage = (data: RespWorkerMessage) => {
  if (typeof self !== 'undefined') {
    self.postMessage(data)
  }
}

/**
 * 初始化
 */
async function init() {
  await tf.ready()
  const onModelStatusChange = (
    name: UpscaleModel,
    status: LoadStatus,
    progress: number,
    loadBytes: number
  ) => {
    if (
      status === LoadStatus.error &&
      (models[name].getLoadHander() as SimpleLoadHandler).lastError
    ) {
      respWorkMessage({
        type: 'error',
        data: `${
          (models[name].getLoadHander() as SimpleLoadHandler).lastError
        }`,
      })
    }
    respWorkMessage({
      type: 'modelStatusChange',
      data: {
        name,
        status,
        progress,
        loadBytes,
      },
    })
  }
  const onModelProgressChange = (
    name: UpscaleModel,
    status: LoadStatus,
    progress: number,
    loadBytes: number
  ) => {
    respWorkMessage({
      type: 'modelLoadProgress',
      data: {
        name,
        status,
        progress,
        loadBytes,
      },
    })
  }
  for (let item of modelList) {
    const loader = item.model.getLoadHander()
    loader.onStatusChange((status) => {
      onModelStatusChange(
        item.name,
        status,
        loader.getLoadProgress(),
        loader.getLoadBytes()
      )
    })
    loader.onProgressChange((progress) => {
      onModelProgressChange(
        item.name,
        loader.getStatus(),
        progress,
        loader.getLoadBytes()
      )
    })
    if (await loader.supportCacheLoad()) {
      await loader.load()
      continue
    }
    if (mustInitLoadModels.includes(item.name)) {
      await item.model.load()
    }
  }
  respWorkMessage({
    type: 'ready',
    data: true,
  })
}
if (typeof self !== 'undefined') {
  self.onmessage = async (e: { data: SendWorkerMessage }) => {
    const param = e.data
    switch (param.type) {
      case 'predict':
        if (workConfig.isRun) {
          respWorkMessage({
            type: 'error',
            data: '当前正在放大中，请稍后',
          })
          return
        }
        predict(param.data)
        break
      case 'getBackend':
        await tf.ready()
        tf.tensor1d([1]).dispose()
        respWorkMessage({
          type: 'getBackend',
          data: tf.getBackend() as RunBack,
        })
        break
      case 'getModelList':
        respWorkMessage({
          type: 'getModelList',
          data: modelList.map((item) => {
            const loader = item.model.getLoadHander()
            return {
              name: item.name,
              status: loader.getStatus(),
              loadBytes: loader.getLoadBytes(),
              progress: loader.getLoadProgress(),
            }
          }),
        })
        break
      case 'requestLoadModel':
        models[param.data].load()
        break
      case 'setBackend':
        if (workConfig.isRun) {
          respWorkMessage({
            type: 'error',
            data: '当前正在运行中，请不要切换后端',
          })
          return
        }
        const oldBackend = tf.getBackend()
        const flag = await tf.setBackend(param.data)
        if (!flag) {
          tf.setBackend(oldBackend)
        }
        respWorkMessage({
          type: 'setBackend',
          data: flag,
        })
        break
      case 'stop': {
        workConfig.runVersion++
        break
      }
    }
  }
}

const workConfig = {
  _isRun: false,
  // 运行版本 ,每次运行都会增加，用于判断停止运行
  runVersion: 0,
} as any as { isRun: boolean; runVersion: number }
Object.defineProperty(workConfig, 'isRun', {
  set: function (value) {
    respWorkMessage({
      type: 'runStatusChange',
      data: value,
    })
    this._isRun = value
  },
  get: function () {
    return this._isRun
  },
})

async function predict(params: ImageUpscaleParams) {
  // console.log(tf.memory().numBytes / 1024 / 1024)

  const predictStartTime = Date.now()
  const { imageData, modelQueue } = params
  // 获取当前tf内存大小

  const shape: [number, number, number] = [
    params.height,
    params.width,
    params.channels,
  ]
  const blockSize = params.blockSize
  workConfig.isRun = true
  const currentRunVersion = ++workConfig.runVersion
  try {
    let input = tf.tidy(() =>
      tf.tensor3d(imageData, shape).cast('float32').div(255)
    ) as tf.Tensor3D
    let initAlpha = null as tf.Tensor3D | null
    if (shape[2] === 4) {
      // 获取alpha通道
      initAlpha = input.slice([0, 0, 3], [shape[0], shape[1], 1])
    }

    let currentQuery = 0
    for (let activeModel of modelQueue as UpscaleModel[]) {
      const model = models[activeModel]
      if (!models[activeModel]) {
        throw new Error('model not found')
      }
      const { width: outWidth, height: outHeight } = model.predictSize(
        input.shape[1],
        input.shape[0]
      )
      const { width: initResetWidth, height: initResetHeight } =
        model.initResize(input.shape[1], input.shape[0])
      input = models[activeModel].initInputTensor(input)
      const blocksData = (await input.data()) as Float32Array
      const blocks = new ImageBlocks({
        width: initResetWidth,
        height: initResetHeight,
        // data: input.dataSync() as Float32Array,
        data: blocksData,
        channel: input.shape[2],
        blockSize,
        padding: model.getInputPadding(),
      })
      await blocks.init()
      const outList: ImageBlock[] = []
      for (let blockIndex = 0; blockIndex < blocks.list.length; blockIndex++) {
        const item = blocks.list[blockIndex]
        const b_input = item.toInputTensor()
        const res = model.predict(b_input)
        await sleep(30) // 生成异步，防止线程没空无法处理主线程的消息
        if (workConfig.runVersion !== currentRunVersion) {
          // 检查是否需要停止
          res.dispose()
          b_input.dispose()
          input.dispose()
          blocks.destroy()
          return
        }
        respWorkMessage({
          type: 'predictProgress',
          data: {
            currentQuery,
            modelQueryCount: modelQueue.length,
            progress: (blockIndex / blocks.list.length) * 100,
          },
        })
        b_input.dispose()
        const output = await ImageBlock.fromTensor(res)
        res.dispose()
        outList.push(output)
        // return output
      }
      respWorkMessage({
        type: 'predictProgress',
        data: {
          currentQuery,
          modelQueryCount: modelQueue.length,
          progress: 100,
        },
      })
      input.dispose()
      input = ImageBlocks.listToTensor(
        outList,
        outWidth,
        outHeight,
        model.getOutPadding()
      )
      blocks.destroy()
      currentQuery++
    }
    const alpha = initAlpha
      ? tf.image.resizeNearestNeighbor(initAlpha, [
          input.shape[0],
          input.shape[1],
        ])
      : tf.fill([input.shape[0], input.shape[1], 1], 1, 'float32')
    initAlpha?.dispose()
    const output = input.concat(alpha, 2) as tf.Tensor3D
    const outArray = await getColorU8ArrayFromTensor(output)
    respWorkMessage({
      type: 'predictResult',
      data: {
        result: outArray,
        width: output.shape[1],
        height: output.shape[0],
        predictTime: Date.now() - predictStartTime,
      },
    })
    tf.dispose([input, output, alpha])
  } catch (e) {
    respWorkMessage({
      type: 'error',
      data: `${e}`,
    })
  } finally {
    // console.log(tf.memory().numBytes / 1024 / 1024)
    workConfig.isRun = false
  }
}
init()
