import ImageRecogManager from '../ApiModel/ImageRecogManager'
import { ScreenShotManager } from './ScreenShotManager'
import rdbManager from "../VecDbModel/rdbManager"
import { ValuesBucket } from '@kit.ArkData';
import emb from "../VecDbModel/embedding"
import { Search1n_1, caps2str } from "../VecDbModel/search"
import PixelMap from "@ohos.multimedia.image"
import { Caption, DBtextEmb, simOrig, strCaptions, textMatchImg } from "../utils/recallInterface"
import { BusinessError } from '@kit.BasicServicesKit';
import image from '@ohos.multimedia.image';
import VLMmanager from "../ApiModel/VLMmanager"
import ImageSimManager from '../ApiModel/ImageSimManager';
import { promptAction } from '@kit.ArkUI';

const TIME_LENGTH = 2;
const TIME_PREFIX = '0';
const SCREEN_RECORD_PREFIX = 'REC';
const IMAGE_TYPE = '.jpg'
const CONNECTOR = '_';

export class ScreenProcessManager {
  ss1 = new ScreenShotManager()
  i1 = new ImageRecogManager()
  is1 = new ImageSimManager()
  vl1 = new VLMmanager()
  dbCreated = false
  context
  lastOCRtext = ""

  constructor(context) {
    this.context = context
  }

  async createDb() {
    let TAG = "_:create_DB"
    console.log(TAG, "entered")
    try {
      await rdbManager.createStore(this.context)
      console.log(TAG, "store created")
      await rdbManager.createTable("ocrimage",
        "(id integer primary key autoincrement,content string,path string,vec string)")
      console.log(TAG, "table created: ocrimage")
      this.dbCreated = true
    } catch (e) {
      console.error(TAG, "error", JSON.stringify(e))
    }
  }

  async search_textEmb(text): Promise<simOrig<DBtextEmb>[]> {
    return await Search1n_1<DBtextEmb>("ocrimage", 5, text)
  }

  async search_imgSim(path): Promise<simOrig<DBtextEmb>[]> {
    let imgstr = await this.ss1.path2Base64(path)
    return await this.is1.search(imgstr)
  }

  async search_textMatch(strdata) {
    let TAG = "_:_search_textMatch"
    console.log(TAG, "entered")
    let buks = (await rdbManager.query("ocrimage")) as any[]
    console.log(TAG, "query DB:", buks.keys(), buks.length, JSON.stringify(buks))
    let textMatchImgs: textMatchImg[] = []
    let paras = []
    let cnt = 0
    for (let buk of buks) {
      let captions: Caption[] = JSON.parse(buk.content)
      let hotCaps = captions.filter((caption) => {
        return caption.words.includes(strdata)
      })
      if (hotCaps.length > 0) {
        console.log(TAG, "found[", cnt++, "]", caps2str(hotCaps))
        paras.push(this.getPixByPath(buk.path))
        textMatchImgs.push(new textMatchImg(hotCaps))
      }
    }
    console.log(TAG, "before Promise.all paras.len", paras.length, "textMatchImgs.len", textMatchImgs.length)
    let pixs: PixelMap.PixelMap[] = await Promise.all(paras)
    console.log(TAG, "after Promise.all pixs.len", pixs.length, "onePix", pixs[pixs.length-1].getPixelBytesNumber(),
      "textMatchImgs.len", textMatchImgs.length)
    for (let i = 0; i < pixs.length; i++) {
      textMatchImgs[i].pix = pixs[i]
      console.log(TAG, "add pix to textMatchImgs[", i, "] Bytes(if_nothing_then_null)",
        textMatchImgs[i].pix?.getPixelBytesNumber(), caps2str(textMatchImgs[i].hotcaps))
    }
    console.log(TAG, "finish,got textMatchImgs,len=", textMatchImgs.length, textMatchImgs)
    return textMatchImgs
  }

  async getPixByPath(path) {
    let TAG = "_:_getPixByPath"
    try {
      let imageSource: image.ImageSource = image.createImageSource(path);
      let decodingOptions: image.DecodingOptions = {
        editable: true,
        desiredPixelFormat: 3,
      }
      let pixelMap: image.PixelMap = await imageSource.createPixelMap(decodingOptions)
      console.log(TAG, "Succeeded in creating PixelMap")
      return pixelMap
    } catch (err) {
      console.error(TAG, "Failed to create PixelMap", JSON.stringify(err))
    }
  }

  async shotStoreImgStr(path): Promise<string> {
    let TAG = "_:shotStoreImgStr"
    console.log(TAG, "entered", path)
    let pix = await this.ss1.shotAndStore(path)
    console.log(TAG, "pix got", pix.getPixelBytesNumber())
    let imgstr = await this.ss1.path2Base64(path)
    console.log(TAG, "imgStr", imgstr.length, imgstr.slice(0, 100))
    return imgstr
  }

  async shotStoreOCRAS(path, embstore = true, addSim = true) {
    let TAG = "_:shotStoreOCRAS addSim= " + addSim
    let pix = await this.ss1.shotAndStore(path)
    let imgstr = await this.ss1.path2Base64(path)
    let para = []
    let StrCaptions: strCaptions
    let addSimResult: boolean = false
    if (addSim) {
      para.push(this.is1.add(imgstr, path))
      para.push(this.i1.OCR(imgstr))
      let paraResult = await Promise.all(para)
      addSimResult = paraResult[0]
      StrCaptions = paraResult[1]
    } else {
      StrCaptions = await this.i1.OCR(imgstr)
    }
    console.log(TAG, "promiseall finish ", "addSimResult", addSimResult, "StrCaptions", JSON.stringify(StrCaptions))
    let imgObj1 = { pix: pix, caps: StrCaptions.captions, path: path }
    let embstoreExec = false
    console.log(TAG, "[last   Page]", this.lastOCRtext)
    console.log(TAG, "[currentPage] compareSame=[", StrCaptions.str == this.lastOCRtext, "]", StrCaptions.str)
    if (embstore && StrCaptions.str != this.lastOCRtext) {
      embstoreExec = true
    }
    this.lastOCRtext = StrCaptions.str
    if (embstoreExec) {
      await this.embStore(StrCaptions, path)
    }
    return imgObj1
  }

  // async shotStoreAddSim(path) {
  //   let TAG = "_:_shotStoreAddSim"
  //   let imgstr=await this.shotStoreImgStr(path)
  //   let reply = await this.is1.add(imgstr,path)
  //   console.log(TAG, "reply got from ImgSimSearch",reply)
  //   return reply
  // }

  async shotVLM(path, prompt = "请描述画面内容") {
    let TAG = "_:_shotVLM"
    let imgstr = await this.shotStoreImgStr(path)
    let reply = await this.vl1.VLM(imgstr, prompt)
    console.log(TAG, "reply got from VLM", reply)
    // promptAction.showToast({ message:  reply })
    return reply
  }

  async VLMfromPath(path, prompt) {
    let TAG = "_:_VLMfromPath"
    let imgstr = await this.ss1.path2Base64(path)
    let reply = await this.vl1.VLM(imgstr, prompt)
    console.log(TAG, "reply got from VLM", reply)
    // promptAction.showToast({ message:  reply })
    return reply
  }

  async embStore(StrCaptions: strCaptions, path) {
    let TAG = "_:_embStore"
    // console.log(TAG, "entered, this.dbCreated=", this.dbCreated)
    if (!this.dbCreated) {
      await this.createDb()
      // console.log(TAG, "created db")
    }
    // console.log(TAG, "accumulated str", capstr)
    let vecs = await emb([StrCaptions.str])
    // console.log(TAG, "vecs got", vecs[0])
    let buk: DBtextEmb = {
      "content": JSON.stringify(StrCaptions.captions).replace(/\\"/g, ""),
      "path": path,
      "vec": vecs[0] as string
    }
    // console.log(TAG, "bucket created", JSON.stringify(buk))
    await rdbManager.insert("ocrimage", buk)
    console.log(TAG, "insert finish with vec", vecs[0])
  }

  getImageName(): string {
    const date = new Date()
    const month = `${date.getMonth() + 1}`.padStart(TIME_LENGTH, TIME_PREFIX)
    const day = date.getDate().toString().padStart(TIME_LENGTH, TIME_PREFIX)
    const year = date.getFullYear().toString().padStart(TIME_LENGTH, TIME_PREFIX)
    const hour = date.getHours().toString().padStart(TIME_LENGTH, TIME_PREFIX)
    const minute = date.getMinutes().toString().padStart(TIME_LENGTH, TIME_PREFIX)
    const second = date.getSeconds().toString().padStart(TIME_LENGTH, TIME_PREFIX)
    const timeStamp = `${year}${month}${day}${CONNECTOR}${hour}${minute}${second}`
    return `${SCREEN_RECORD_PREFIX}${CONNECTOR}${timeStamp}${IMAGE_TYPE}`
  }
}
