import {
  createScopedThreejs
} from '../thress'  //引入解析3d模型第三方库
import {
  registerGLTFLoader
} from '../gltf-loader'
import cloneGltf from '../gltf-clone'
import flip from './flip.js'  //将canvas获取的图片以y轴旋转
const NEAR = 0.001
const FAR = 1000
let DEBUG_SIZE = false // 目前支持不完善
const app = getApp()
Page({

  /**
   * 页面的初始数据
   */
  data: {
    modelType: false,//识别到对应物品为true
    identifyImg: [],
    cameraPosition: 0,
  },
  //点击打卡，获得canvas数据图片
  saveImg: function () {
    let that = this
    if (!this.data.modelType) {//没有识别不能打卡
      return
    } else {
      wx.showLoading({
        title: '正在识别...',
      })
      let self = this
      var gl = this.canvas.getContext("webgl", {//获取canvas
        preserveDrawingBuffer: true
      });
      const {
        drawingBufferWidth: width,
        drawingBufferHeight: height
      } = gl;
      const pixels = new Uint8Array(width * height * 4);
      gl.readPixels(0, 0, width, height, gl.RGBA, gl.UNSIGNED_BYTE, pixels);
      flip(pixels, width, height, 4);//旋转获取的图片数据
      wx.canvasPutImageData({
        canvasId: "myCanvas",
        data: new Uint8ClampedArray(typedArrayToBuffer(pixels)),
        x: 0,
        y: 0,
        width: width,
        height: height,
        success(res) {
          console.log('canvasPutImage success', res);
          save();
        },
        fail(err) {
          console.log('canvasPutImage err', err);
        }
      }, self);
      function save() {
        wx.canvasToTempFilePath({
          x: 0,
          y: 0,
          width: width,
          height: height,
          destWidth: width,
          destHeight: height,
          canvasId: 'myCanvas',
          success(res) {
            console.log(res.tempFilePath);
            if (res.tempFilePath) {
              setTimeout(() => {
                that.uploads(res.tempFilePath)
              }, 500)
            }
          },
          fail(res) {
            console.log(res);
          }
        }, self)
      }
      function typedArrayToBuffer(array) {
        return array.buffer.slice(array.byteOffset, array.byteLength + array.byteOffset)
      }
    }
  },

  uploads(image) {
    let that = this
    wx.uploadFile({
      filePath: image,
      name: 'image',
      url: app.globalData.reqUrl + '/web/index.php?r=api/video/upload-img&_mall_id=3',
      success(res) {
        wx.hideLoading()
        wx.showToast({
          title: '打卡成功',
          icon: 'success',
          duration: 500
        })
        wx.redirectTo({//获得图片跳转
          url: '/pages/user-center/clockPages/pushClock/index?imgUrl=' + JSON.parse(res.data).data + '&id=' + that.data.id,
        })
        console.log('上传文件', res);
      },
      fail(err) {
        console.log(err);
      }
    })
  },
  /**
   * 生命周期函数--监听页面加载
   */
  onLoad(options) {
    console.log(JSON.parse(options.img));
    this.setData({
      identifyImg: JSON.parse(options.img),
      id: options.id
    })
    wx.createSelectorQuery()
      .select('#webgls')
      .node()
      .exec(res => {
        this.canvas = res[0].node
        const version = wx.getSystemInfoSync().SDKVersion
        console.log("当前基础库版本：", version);
        if (this.compareVersion(version, '2.22.0') >= 0) {
          this.initVK()
        } else {
          // 如果希望用户在最新版本的客户端上体验您的小程序，可以这样子提示
          wx.showModal({
            title: '提示',
            content: '当前微信版本过低，无法使用该功能，请升级到最新微信版本后重试。'
          })
        }
      })
  },
  //切换前置摄像头
  switchCamera(event) {
    console.log(this.session.config);
    if (this.session.config) {
      const config = this.session.config
      let pos = Number(event.currentTarget.dataset.value)
      config.cameraPosition = pos
      this.session.config = config
      this.setData({
        cameraPosition: event.currentTarget.dataset.value
      })
    }
  },
  //版本检测
  compareVersion(v1, v2) {
    v1 = v1.split('.')
    v2 = v2.split('.')
    const len = Math.max(v1.length, v2.length)
    while (v1.length < len) {
      v1.push('0')
    }
    while (v2.length < len) {
      v2.push('0')
    }
    for (let i = 0; i < len; i++) {
      const num1 = parseInt(v1[i])
      const num2 = parseInt(v2[i])
      if (num1 > num2) {
        return 1
      } else if (num1 < num2) {
        return -1
      }
    }
    return 0
  },
  initVK() {
    // 初始化 threejs
    this.initTHREE()
    const THREE = this.THREE
    // 自定义初始化
    if (this.init) this.init()
    const session = this.session = wx.createVKSession({
      track: {
        plane: {
          mode: 3
        },
        marker: true,
      },
      version: 'v1',
      gl: this.gl
    })
    session.start(err => {
      if (err) return console.error('VK error: ', err)
      const canvas = this.canvas
      const calcSize = (width, height, pixelRatio) => {
        console.log(`canvas size: width = ${width} , height = ${height}`)
        this.canvas.width = width * pixelRatio / 2
        this.canvas.height = height * pixelRatio / 2
        this.setData({
          width,
          height,
        })
      }
      session.on('resize', () => {
        const info = wx.getSystemInfoSync()
        calcSize(info.windowWidth, info.windowHeight * 0.6, info.pixelRatio)
      })
      const loader = new THREE.GLTFLoader()
      //3d模型地址
      loader.load('https://dldir1.qq.com/weixin/miniprogram/RobotExpressive_aa2603d917384b68bb4a086f32dabe83.glb', gltf => {
        this.model = {
          scene: gltf.scene,
          animations: gltf.animations,
        }
      })
      this.clock = new THREE.Clock()
      const createPlane = size => {
        const geometry = new THREE.PlaneGeometry(size.width, size.height)
        const material = new THREE.MeshBasicMaterial({
          color: 0xffffff,
          side: THREE.DoubleSide,
          transparent: true,
          opacity: 0.5
        })
        const mesh = new THREE.Mesh(geometry, material)
        mesh.rotateX(Math.PI / 2)
        const cnt = new THREE.Object3D()
        cnt.add(mesh)
        return cnt
      }
      const updateMatrix = (object, m) => {
        object.matrixAutoUpdate = false
        object.matrix.fromArray(m)
      }
      this.addMarker()
      session.on('addAnchors', anchors => {//识别成功监听
        this.setData({
          modelType: true
        })
        anchors.forEach(anchor => {
          const size = anchor.size
          let object
          if (size && DEBUG_SIZE) {
            object = createPlane(size)
          } else {
            if (!this.model) {
              console.warn('this.model 还没加载完成 ！！！！！')
              return
            }
            object = new THREE.Object3D()
            const model = this.getRobot()
            console.log('247model', model);
            model.rotateX(-Math.PI / 2)
            object.add(model)
          }
          object._id = anchor.id
          object._size = size
          updateMatrix(object, anchor.transform)
          this.planeBox.add(object)
        })
      })
      session.on('updateAnchors', anchors => {//持续出发，识别到监听
        this.setData({
          modelType: true
        })
        const map = anchors.reduce((temp, item) => {
          temp[item.id] = item
          return temp
        }, {})
        this.planeBox.children.forEach(object => {
          if (object._id && map[object._id]) {
            const anchor = map[object._id]
            const size = anchor.size
            if (size && DEBUG_SIZE && object._size && (size.width !== object._size.width || size.height !== object._size.height)) {
              this.planeBox.remove(object)
              object = createPlane(size)
              this.planeBox.add(object)
            }
            object._id = anchor.id
            object._size = size
            updateMatrix(object, anchor.transform)
          }
        })
      })
      session.on('removeAnchors', anchors => {//识别物体移出屏幕内
        this.setData({
          modelType: false
        })
        const map = anchors.reduce((temp, item) => {
          temp[item.id] = item
          return temp
        }, {})
        this.planeBox.children.forEach(object => {
          if (object._id && map[object._id]) this.planeBox.remove(object)
        })
      })
      // 平面集合
      const planeBox = this.planeBox = new THREE.Object3D()
      this.scene.add(planeBox)
      //限制调用帧率
      let fps = 30
      let fpsInterval = 1000 / fps
      let last = Date.now()
      // 逐帧渲染
      const onFrame = timestamp => {
        let now = Date.now()
        const mill = now - last
        // 经过了足够的时间
        if (mill > fpsInterval) {
          last = now - (mill % fpsInterval); //校正当前时间
          const frame = session.getVKFrame(canvas.width, canvas.height)
          if (frame) {
            this.render(frame)
          }
        }
        session.requestAnimationFrame(onFrame)
      }
      session.requestAnimationFrame(onFrame)
    })
  },
  initTHREE() {
    const THREE = this.THREE = createScopedThreejs(this.canvas)
    registerGLTFLoader(THREE)
    // 相机
    this.camera = new THREE.Camera()
    // 场景
    const scene = this.scene = new THREE.Scene()
    // 光源
    const light1 = new THREE.HemisphereLight(0xffffff, 0x444444) // 半球光
    light1.position.set(0, 0.2, 0)
    scene.add(light1)
    const light2 = new THREE.DirectionalLight(0xffffff) // 平行光
    light2.position.set(0, 0.2, 0.1)
    scene.add(light2)
    // 渲染层
    const renderer = this.renderer = new THREE.WebGLRenderer({
      antialias: false,//如果不设false会导致iOS不生成图片，原因不明确
      alpha: true
    })
    renderer.gammaOutput = true
    renderer.gammaFactor = 2.2
  },
  updateAnimation() {
    const dt = this.clock.getDelta()
    if (this.mixers) this.mixers.forEach(mixer => mixer.update(dt))
  },
  copyRobot() {
    const THREE = this.THREE
    const {
      scene,
      animations
    } = cloneGltf(this.model, THREE)
    scene.scale.set(0.05, 0.05, 0.05)
    // 动画混合器
    const mixer = new THREE.AnimationMixer(scene)
    for (let i = 0; i < animations.length; i++) {
      const clip = animations[i]
      if (clip.name === 'Dance') {
        const action = mixer.clipAction(clip)
        action.play()
      }
    }
    this.mixers = this.mixers || []
    this.mixers.push(mixer)
    scene._mixer = mixer
    return scene
  },
  getRobot() {
    const THREE = this.THREE
    const model = new THREE.Object3D()
    model.add(this.copyRobot())
    this._insertModels = this._insertModels || []
    this._insertModels.push(model)
    if (this._insertModels.length > 5) {
      const needRemove = this._insertModels.splice(0, this._insertModels.length - 5)
      needRemove.forEach(item => {
        if (item._mixer) {
          const mixer = item._mixer
          this.mixers.splice(this.mixers.indexOf(mixer), 1)
          mixer.uncacheRoot(mixer.getRoot())
        }
        if (item.parent) item.parent.remove(item)
      })
    }
    return model
  },
  init() {
    this.initGL()
  },
  render(frame) {
    this.renderGL(frame)
    const camera = frame.camera
    // 相机
    if (camera) {
      this.camera.matrixAutoUpdate = false
      this.camera.matrixWorldInverse.fromArray(camera.viewMatrix)
      this.camera.matrixWorld.getInverse(this.camera.matrixWorldInverse)
      const projectionMatrix = camera.getProjectionMatrix(NEAR, FAR)
      this.camera.projectionMatrix.fromArray(projectionMatrix)
      this.camera.projectionMatrixInverse.getInverse(this.camera.projectionMatrix)
    }
    // 更新动画
    this.updateAnimation()
    this.renderer.autoClearColor = false
    this.renderer.render(this.scene, this.camera)
    this.renderer.state.setCullFace(this.THREE.CullFaceNone)
  },
  //添加识别物品方法
  addMarker() {
    let that = this
    if (this.markerId) return
    const fs = wx.getFileSystemManager()
    // 此处如果为jpeg,则后缀名也需要改成对应后缀
    //这里写demo代码写的不要介意，自己修改可以，将所识别的网络图片放在这里就行（可多张）
    const download = () => {
      this.markerId = []
      this.data.identifyImg.forEach((item, index) => {
        const filePath = `${wx.env.USER_DATA_PATH}/marker-ar${index}.jpeg`
        wx.downloadFile({
          // 此处设置为识别的3d对象的map地址
          url: item.image,
          success(res) {
            fs.saveFile({
              filePath,
              tempFilePath: res.tempFilePath,
              success(res1) { //更改过本地的图片以后再进行识别
                console.log('downFile', res1);
                const markerId = that.session.addMarker(filePath)
                that.markerId.push(markerId)
              },
            })
          }
        })
      })
      console.log('添加的识别模型', this.markerId);
    }
    download()
  },
  removeMarker() {
    console.log('执行removeMarker');
    if (this.markerId.length != 0) {
      this.markerId.forEach(item => {
        this.session.removeMarker(item)
      })
      this.markerId = []
      this.setData({
        identifyImg: []
      })
    }
  },

  /**
   * 生命周期函数--监听页面卸载
   */
  onUnload() {
    console.log("页面detached")
    this.removeMarker()  //离开页面自动清除marker
    if (this._texture) {
      this._texture.dispose()
      this._texture = null
    }
    if (this.renderer) {
      this.renderer.dispose()
      this.renderer = null
    }
    if (this.scene) {
      this.scene.dispose()
      this.scene = null
    }
    if (this.camera) this.camera = null
    if (this.model) this.model = null
    if (this._insertModel) this._insertModel = null
    if (this._insertModels) this._insertModels = null
    if (this.planeBox) this.planeBox = null
    if (this.mixers) {
      this.mixers.forEach(mixer => mixer.uncacheRoot(mixer.getRoot()))
      this.mixers = null
    }
    if (this.clock) this.clock = null

    if (this.THREE) this.THREE = null
    if (this._tempTexture && this._tempTexture.gl) {
      this._tempTexture.gl.deleteTexture(this._tempTexture)
      this._tempTexture = null
    }
    if (this._fb && this._fb.gl) {
      this._fb.gl.deleteFramebuffer(this._fb)
      this._fb = null
    }
    if (this._program && this._program.gl) {
      this._program.gl.deleteProgram(this._program)
      this._program = null
    }
    if (this.canvas) this.canvas = null
    if (this.gl) this.gl = null
    if (this.session) this.session = null
    if (this.anchor2DList) this.anchor2DList = []
  },

  /**
   * 页面相关事件处理函数--监听用户下拉动作
   */
  onPullDownRefresh() {

  },
  initShader() {
    const gl = this.gl = this.renderer.getContext()
    const currentProgram = gl.getParameter(gl.CURRENT_PROGRAM)
    const vs = `
attribute vec2 a_position;
attribute vec2 a_texCoord;
uniform mat3 displayTransform;
varying vec2 v_texCoord;
void main() {
  vec3 p = displayTransform * vec3(a_position, 0);
  gl_Position = vec4(p, 1);
  v_texCoord = a_texCoord;
}
`
    const fs = `
precision highp float;

uniform sampler2D y_texture;
uniform sampler2D uv_texture;
varying vec2 v_texCoord;
void main() {
  vec4 y_color = texture2D(y_texture, v_texCoord);
  vec4 uv_color = texture2D(uv_texture, v_texCoord);

  float Y, U, V;
  float R ,G, B;
  Y = y_color.r;
  U = uv_color.r - 0.5;
  V = uv_color.a - 0.5;
  
  R = Y + 1.402 * V;
  G = Y - 0.344 * U - 0.714 * V;
  B = Y + 1.772 * U;
  
  gl_FragColor = vec4(R, G, B, 1.0);
}
`
    const vertShader = gl.createShader(gl.VERTEX_SHADER)
    gl.shaderSource(vertShader, vs)
    gl.compileShader(vertShader)

    const fragShader = gl.createShader(gl.FRAGMENT_SHADER)
    gl.shaderSource(fragShader, fs)
    gl.compileShader(fragShader)

    const program = this._program = gl.createProgram()
    this._program.gl = gl
    gl.attachShader(program, vertShader)
    gl.attachShader(program, fragShader)
    gl.deleteShader(vertShader)
    gl.deleteShader(fragShader)
    gl.linkProgram(program)
    gl.useProgram(program)

    const uniformYTexture = gl.getUniformLocation(program, 'y_texture')
    gl.uniform1i(uniformYTexture, 5)
    const uniformUVTexture = gl.getUniformLocation(program, 'uv_texture')
    gl.uniform1i(uniformUVTexture, 6)

    this._dt = gl.getUniformLocation(program, 'displayTransform')
    gl.useProgram(currentProgram)
  },
  initVAO() {
    const gl = this.renderer.getContext()
    const ext = gl.getExtension('OES_vertex_array_object')
    this.ext = ext

    const currentVAO = gl.getParameter(gl.VERTEX_ARRAY_BINDING)
    const vao = ext.createVertexArrayOES()

    ext.bindVertexArrayOES(vao)

    const posAttr = gl.getAttribLocation(this._program, 'a_position')
    const pos = gl.createBuffer()
    gl.bindBuffer(gl.ARRAY_BUFFER, pos)
    gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([1, 1, -1, 1, 1, -1, -1, -1]), gl.STATIC_DRAW)
    gl.vertexAttribPointer(posAttr, 2, gl.FLOAT, false, 0, 0)
    gl.enableVertexAttribArray(posAttr)
    vao.posBuffer = pos

    const texcoordAttr = gl.getAttribLocation(this._program, 'a_texCoord')
    const texcoord = gl.createBuffer()
    gl.bindBuffer(gl.ARRAY_BUFFER, texcoord)
    gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([1, 1, 0, 1, 1, 0, 0, 0]), gl.STATIC_DRAW)
    gl.vertexAttribPointer(texcoordAttr, 2, gl.FLOAT, false, 0, 0)
    gl.enableVertexAttribArray(texcoordAttr)
    vao.texcoordBuffer = texcoord

    ext.bindVertexArrayOES(currentVAO)
    this._vao = vao
  },
  initGL() {
    this.initShader()
    this.initVAO()
  },
  renderGL(frame) {
    const gl = this.renderer.getContext()
    gl.disable(gl.DEPTH_TEST)
    const {
      yTexture,
      uvTexture
    } = frame.getCameraTexture(gl, 'yuv')
    const displayTransform = frame.getDisplayTransform()
    if (yTexture && uvTexture) {
      const currentProgram = gl.getParameter(gl.CURRENT_PROGRAM)
      const currentActiveTexture = gl.getParameter(gl.ACTIVE_TEXTURE)
      const currentVAO = gl.getParameter(gl.VERTEX_ARRAY_BINDING)
      gl.useProgram(this._program)
      this.ext.bindVertexArrayOES(this._vao)

      gl.uniformMatrix3fv(this._dt, false, displayTransform)
      gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1)

      gl.activeTexture(gl.TEXTURE0 + 5)
      const bindingTexture5 = gl.getParameter(gl.TEXTURE_BINDING_2D)
      gl.bindTexture(gl.TEXTURE_2D, yTexture)

      gl.activeTexture(gl.TEXTURE0 + 6)
      const bindingTexture6 = gl.getParameter(gl.TEXTURE_BINDING_2D)
      gl.bindTexture(gl.TEXTURE_2D, uvTexture)

      gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4)

      gl.bindTexture(gl.TEXTURE_2D, bindingTexture6)
      gl.activeTexture(gl.TEXTURE0 + 5)
      gl.bindTexture(gl.TEXTURE_2D, bindingTexture5)

      gl.useProgram(currentProgram)
      gl.activeTexture(currentActiveTexture)
      this.ext.bindVertexArrayOES(currentVAO)
    }
  },
})